diff --git a/.gitattributes b/.gitattributes
index 21d8eca96754804c6487d640534b05ed5f83dad4..0cd58331b2a989b68be4ec5676383437fca8687b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -34,6 +34,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
*.so filter=lfs diff=lfs merge=lfs -text
-build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd filter=lfs diff=lfs merge=lfs -text
-media/benches.gif filter=lfs diff=lfs merge=lfs -text
-media/benches.mp4 filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index 9f52c61476792e17c42362b1b3aae4a751fd6cbe..4d0561e803c34dc86855ced8fdaa54ca836ef8e2 100644
--- a/README.md
+++ b/README.md
@@ -1,26 +1,7 @@
---
tags:
- - kernels
+- kernel
---
-
-
## Activation
-Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
-
-Kernel source: https://github.com/huggingface/kernels-community/tree/main/activation
-
-### Performance
-
-
-
-
-
-
-
-
-
-
-
-
-
+Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
\ No newline at end of file
diff --git a/activation/activation_kernels.cu b/activation/activation_kernels.cu
new file mode 100644
index 0000000000000000000000000000000000000000..839dc36ba4e29d34144b42b4f1ed40be03afd5eb
--- /dev/null
+++ b/activation/activation_kernels.cu
@@ -0,0 +1,204 @@
+#include
+#include
+#include
+
+#include
+
+#include "cuda_compat.h"
+#include "dispatch_utils.h"
+
+namespace vllm {
+
+// Activation and gating kernel template.
+template
+__global__ void act_and_mul_kernel(
+ scalar_t* __restrict__ out, // [..., d]
+ const scalar_t* __restrict__ input, // [..., 2, d]
+ const int d) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]);
+ const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]);
+ out[token_idx * d + idx] = ACT_FN(x) * y;
+ }
+}
+
+template
+__device__ __forceinline__ T silu_kernel(const T& x) {
+ // x * sigmoid(x)
+ return (T)(((float)x) / (1.0f + expf((float)-x)));
+}
+
+template
+__device__ __forceinline__ T gelu_kernel(const T& x) {
+ // Equivalent to PyTorch GELU with 'none' approximation.
+ // Refer to:
+ // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L36-L38
+ const float f = (float)x;
+ constexpr float ALPHA = M_SQRT1_2;
+ return (T)(f * 0.5f * (1.0f + ::erf(f * ALPHA)));
+}
+
+template
+__device__ __forceinline__ T gelu_tanh_kernel(const T& x) {
+ // Equivalent to PyTorch GELU with 'tanh' approximation.
+ // Refer to:
+ // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L25-L30
+ const float f = (float)x;
+ constexpr float BETA = M_SQRT2 * M_2_SQRTPI * 0.5f;
+ constexpr float KAPPA = 0.044715;
+ float x_cube = f * f * f;
+ float inner = BETA * (f + KAPPA * x_cube);
+ return (T)(0.5f * f * (1.0f + ::tanhf(inner)));
+}
+
+} // namespace vllm
+
+// Launch activation and gating kernel.
+#define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL) \
+ int d = input.size(-1) / 2; \
+ int64_t num_tokens = input.numel() / input.size(-1); \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES( \
+ input.scalar_type(), "act_and_mul_kernel", [&] { \
+ vllm::act_and_mul_kernel> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d); \
+ });
+
+void silu_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel);
+}
+
+void gelu_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel);
+}
+
+void gelu_tanh_and_mul(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., 2 * d]
+{
+ LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel);
+}
+
+namespace vllm {
+
+template
+__device__ __forceinline__ T fatrelu_kernel(const T& x, const float threshold) {
+ const float f = (float)x;
+ return (T)(f > threshold ? f : 0.0f);
+}
+
+template
+__global__ void act_and_mul_kernel_with_param(
+ scalar_t* __restrict__ out, const scalar_t* __restrict__ input, const int d,
+ const float param) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]);
+ const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]);
+ out[token_idx * d + idx] = ACT_FN(x, param) * y;
+ }
+}
+
+} // namespace vllm
+
+#define LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(KERNEL, PARAM) \
+ int d = input.size(-1) / 2; \
+ int64_t num_tokens = input.numel() / input.size(-1); \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES( \
+ input.scalar_type(), "act_and_mul_kernel_with_param", [&] { \
+ vllm::act_and_mul_kernel_with_param> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d, \
+ PARAM); \
+ });
+
+void fatrelu_and_mul(torch::Tensor& out, // [..., d],
+ torch::Tensor& input, // [..., 2 * d]
+ double threshold) {
+ LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(vllm::fatrelu_kernel, threshold);
+}
+namespace vllm {
+
+// Element-wise activation kernel template.
+template
+__global__ void activation_kernel(
+ scalar_t* __restrict__ out, // [..., d]
+ const scalar_t* __restrict__ input, // [..., d]
+ const int d) {
+ const int64_t token_idx = blockIdx.x;
+ for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
+ const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]);
+ out[token_idx * d + idx] = ACT_FN(x);
+ }
+}
+
+} // namespace vllm
+
+// Launch element-wise activation kernel.
+#define LAUNCH_ACTIVATION_KERNEL(KERNEL) \
+ int d = input.size(-1); \
+ int64_t num_tokens = input.numel() / d; \
+ dim3 grid(num_tokens); \
+ dim3 block(std::min(d, 1024)); \
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \
+ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \
+ VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "activation_kernel", [&] { \
+ vllm::activation_kernel> \
+ <<>>(out.data_ptr(), \
+ input.data_ptr(), d); \
+ });
+
+namespace vllm {
+
+template
+__device__ __forceinline__ T gelu_new_kernel(const T& x) {
+ const float x3 = (float)(x * x * x);
+ const T t = (T)tanhf((T)(0.79788456f * (float)(x + (T)(0.044715f * x3))));
+ return ((T)0.5) * x * (((T)1.0) + t);
+}
+
+template
+__device__ __forceinline__ T gelu_fast_kernel(const T& x) {
+ const float f = (float)x;
+ const T t =
+ (T)tanhf(((T)(f * 0.79788456f)) * (((T)1.0) + (T)(0.044715f * f) * x));
+ return ((T)0.5) * x * (((T)1.0) + t);
+}
+
+template
+__device__ __forceinline__ T gelu_quick_kernel(const T& x) {
+ // x * sigmoid(1.702 * x)
+ return (T)(((float)x) / (1.0f + expf(-1.702f * (float)x)));
+}
+
+} // namespace vllm
+
+void gelu_new(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_new_kernel);
+}
+
+void gelu_fast(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_fast_kernel);
+}
+
+void gelu_quick(torch::Tensor& out, // [..., d]
+ torch::Tensor& input) // [..., d]
+{
+ LAUNCH_ACTIVATION_KERNEL(vllm::gelu_quick_kernel);
+}
diff --git a/activation/cuda_compat.h b/activation/cuda_compat.h
new file mode 100644
index 0000000000000000000000000000000000000000..82e55613d915a4a9dda5d73eea3601ad0ef949c5
--- /dev/null
+++ b/activation/cuda_compat.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#ifdef USE_ROCM
+ #include
+#endif
+
+#ifndef USE_ROCM
+ #define WARP_SIZE 32
+#else
+ #define WARP_SIZE warpSize
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_LDG(arg) __ldg(arg)
+#else
+ #define VLLM_LDG(arg) *(arg)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_XOR_SYNC(var, lane_mask) \
+ __shfl_xor_sync(uint32_t(-1), var, lane_mask)
+ #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
+ __shfl_xor_sync(uint32_t(-1), var, lane_mask, width)
+#else
+ #define VLLM_SHFL_XOR_SYNC(var, lane_mask) __shfl_xor(var, lane_mask)
+ #define VLLM_SHFL_XOR_SYNC_WIDTH(var, lane_mask, width) \
+ __shfl_xor(var, lane_mask, width)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_SYNC(var, src_lane) __shfl_sync(uint32_t(-1), var, src_lane)
+#else
+ #define VLLM_SHFL_SYNC(var, src_lane) __shfl(var, src_lane)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) \
+ __shfl_down_sync(uint32_t(-1), var, lane_delta)
+#else
+ #define VLLM_SHFL_DOWN_SYNC(var, lane_delta) __shfl_down(var, lane_delta)
+#endif
+
+#ifndef USE_ROCM
+ #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
+ cudaFuncSetAttribute(FUNC, cudaFuncAttributeMaxDynamicSharedMemorySize, VAL)
+#else
+ #define VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize(FUNC, VAL) \
+ hipFuncSetAttribute(FUNC, hipFuncAttributeMaxDynamicSharedMemorySize, VAL)
+#endif
diff --git a/activation/dispatch_utils.h b/activation/dispatch_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..a634e1c3d488676cf9beb11bc3029915e484b163
--- /dev/null
+++ b/activation/dispatch_utils.h
@@ -0,0 +1,35 @@
+/*
+ * Adapted from
+ * https://github.com/pytorch/pytorch/blob/v2.0.1/aten/src/ATen/Dispatch.h
+ */
+#pragma once
+
+#include
+
+#define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__)
+
+#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__)
+
+#define VLLM_DISPATCH_FLOATING_AND_BYTE_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, \
+ VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(__VA_ARGS__))
+
+#define VLLM_DISPATCH_CASE_INTEGRAL_TYPES(...) \
+ AT_DISPATCH_CASE(at::ScalarType::Byte, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Short, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Int, __VA_ARGS__) \
+ AT_DISPATCH_CASE(at::ScalarType::Long, __VA_ARGS__)
+
+#define VLLM_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
+ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_INTEGRAL_TYPES(__VA_ARGS__))
diff --git a/benchmarks/benchmark.py b/benchmarks/benchmark.py
deleted file mode 100644
index 0522bbfdbf8749d671faa8fe91b169dbd2cafa8d..0000000000000000000000000000000000000000
--- a/benchmarks/benchmark.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from kernels.benchmarks import SiluAndMulBenchmark
-
-
-class SiluWorkloads(SiluAndMulBenchmark):
- pass
diff --git a/build.toml b/build.toml
new file mode 100644
index 0000000000000000000000000000000000000000..7da9d632a70edb0699eb77f097b9b1a5ae573c48
--- /dev/null
+++ b/build.toml
@@ -0,0 +1,17 @@
+[general]
+name = "activation"
+
+[torch]
+src = [
+ "torch-ext/torch_binding.cpp",
+ "torch-ext/torch_binding.h"
+]
+
+[kernel.activation]
+cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ]
+src = [
+ "activation/activation_kernels.cu",
+ "activation/cuda_compat.h",
+ "activation/dispatch_utils.h",
+]
+depends = [ "torch" ]
diff --git a/build/torch210-cu128-x86_64-windows/activation/__init__.py b/build/torch210-cu128-x86_64-windows/activation/__init__.py
deleted file mode 100644
index 379e245ef7515d04bfe4e680e2549fcf8790cc15..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd b/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd
deleted file mode 100644
index ca0f54ee392befa7fa8a084bcc730e416a912f23..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/_activation_e1b4b08.pyd
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:d741006dd4fe8a85ed461fa3727d4d9f1b438083d2f1075ae54650bbdd2dc179
-size 2463744
diff --git a/build/torch210-cu128-x86_64-windows/activation/_ops.py b/build/torch210-cu128-x86_64-windows/activation/_ops.py
deleted file mode 100644
index 110a36d47839efd80d8d58e5cce311e50d684990..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_e1b4b08
-ops = torch.ops._activation_e1b4b08
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_e1b4b08::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cu128-x86_64-windows/activation/layers.py b/build/torch210-cu128-x86_64-windows/activation/layers.py
deleted file mode 100644
index 3dbfa19f89f2514b94e7b35d528a1e76ec4da7a3..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/activation/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cu128-x86_64-windows/metadata.json b/build/torch210-cu128-x86_64-windows/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cu128-x86_64-windows/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 56710735c8054926018bdc657c86acf38357f2ea..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:5aaa5c4a7e3fe6967d10f5bc25c899507a57f2fd941c9aff02bd9ded610d9542
-size 3126824
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py b/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/layers.py b/build/torch210-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json b/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 2b154e7fcc3fa0cfaa0080d434d825559e56d1a1..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0e4d411f1093b8e4ec62529e9eb94550fc4ff8efc073e20eaedd7ea587885390
-size 4406608
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py b/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/layers.py b/build/torch210-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json b/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so b/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index d78e3b3eb68814951f17530588ae4abd731837eb..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f843d365b4eadc5e5b587becaa8ba581a0e0007adf7f1fba59442dd8acf4cd42
-size 4190152
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py b/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py b/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/layers.py b/build/torch210-cxx11-cu130-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json b/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-cxx11-cu130-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch210-metal-aarch64-darwin/__init__.py b/build/torch210-metal-aarch64-darwin/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so b/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so
deleted file mode 100644
index ba3b331d8ec8fbebaa26c880f2be4824ae26de15..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:40b08339eb57c5db3a676d69eafc6d1be7cf14e71e57a544289e8922ab7c118c
-size 221272
diff --git a/build/torch210-metal-aarch64-darwin/_ops.py b/build/torch210-metal-aarch64-darwin/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch210-metal-aarch64-darwin/activation/__init__.py b/build/torch210-metal-aarch64-darwin/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch210-metal-aarch64-darwin/layers.py b/build/torch210-metal-aarch64-darwin/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch210-metal-aarch64-darwin/metadata.json b/build/torch210-metal-aarch64-darwin/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch210-metal-aarch64-darwin/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
similarity index 90%
rename from build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py
rename to build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644
--- a/build/torch28-cxx11-cu126-aarch64-linux/activation/__init__.py
+++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
@@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
return out
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_and_mul(out, x)
return out
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..1442eb73f43c4768d2b8f9b916943743256af106
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_activation_o63kkyjirmkf4.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d50cdabfbed1df74e921ac34ff00bca0555977b14ef8082ddae7b1f30985a494
+size 2370160
diff --git a/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ee3c40cd38699ce7744be53b0531fe4fa505996
--- /dev/null
+++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_o63kkyjirmkf4
+ops = torch.ops._activation_o63kkyjirmkf4
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_o63kkyjirmkf4::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py
similarity index 51%
rename from build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py
rename to build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py
index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f 100644
--- a/build/torch28-cxx11-cu126-aarch64-linux/activation/layers.py
+++ b/build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py
@@ -5,17 +5,6 @@ from ._ops import ops
class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -24,38 +13,7 @@ class SiluAndMul(nn.Module):
return out
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -65,8 +23,6 @@ class GeluAndMul(nn.Module):
class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -76,19 +32,6 @@ class GeluTanhAndMul(nn.Module):
class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
def __init__(self, threshold: float = 0.0):
super().__init__()
self.threshold = threshold
@@ -102,8 +45,6 @@ class FatreluAndMul(nn.Module):
class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_fast(out, x)
@@ -111,8 +52,6 @@ class FastGELU(nn.Module):
class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_new(out, x)
@@ -120,8 +59,6 @@ class NewGELU(nn.Module):
class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_quick(out, x)
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
similarity index 90%
rename from build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py
rename to build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
index 1c4f207354093c6ef83eb5d7f3a5a3b22b95d357..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644
--- a/build/torch28-cxx11-cu128-aarch64-linux/activation/__init__.py
+++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
@@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
return out
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_and_mul(out, x)
return out
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..e6b6260c4378717d4369cd2577a00350960e317c
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_activation_vrl36m2ejer54.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2bd0709ef09c8f0c18d1dc4a36c8096c59459bece61f5f5dbea95d1e73f54d44
+size 2393264
diff --git a/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd5eee20b4c9a70e4f59e7198a0a28c5dfa06244
--- /dev/null
+++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_vrl36m2ejer54
+ops = torch.ops._activation_vrl36m2ejer54
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_vrl36m2ejer54::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py
similarity index 51%
rename from build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py
rename to build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py
index 45b31181ffb80509a85d729a7f7ee86fc2cf014a..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f 100644
--- a/build/torch28-cxx11-cu128-aarch64-linux/activation/layers.py
+++ b/build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py
@@ -5,17 +5,6 @@ from ._ops import ops
class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -24,38 +13,7 @@ class SiluAndMul(nn.Module):
return out
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -65,8 +23,6 @@ class GeluAndMul(nn.Module):
class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor):
d = x.shape[-1] // 2
output_shape = x.shape[:-1] + (d,)
@@ -76,19 +32,6 @@ class GeluTanhAndMul(nn.Module):
class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
def __init__(self, threshold: float = 0.0):
super().__init__()
self.threshold = threshold
@@ -102,8 +45,6 @@ class FatreluAndMul(nn.Module):
class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_fast(out, x)
@@ -111,8 +52,6 @@ class FastGELU(nn.Module):
class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_new(out, x)
@@ -120,8 +59,6 @@ class NewGELU(nn.Module):
class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
ops.gelu_quick(out, x)
diff --git a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
similarity index 69%
rename from build/torch210-cxx11-cu130-x86_64-linux/__init__.py
rename to build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644
--- a/build/torch210-cxx11-cu130-x86_64-linux/__init__.py
+++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
@@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
return out
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_and_mul(out, x)
return out
@@ -30,20 +25,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +42,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..2bfbc3667e483ce8441b310889b63614133f6334
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_activation_va3moa75vw7c2.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8353447f64e7d2df1a6a341d9c53bced53abef267f079923ae774170d0d57c53
+size 2427936
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..05fa036d769ea858865c92b972c68a158899eca1
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_va3moa75vw7c2
+ops = torch.ops._activation_va3moa75vw7c2
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_va3moa75vw7c2::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
similarity index 69%
rename from build/torch210-cxx11-cu128-x86_64-linux/__init__.py
rename to build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78 100644
--- a/build/torch210-cxx11-cu128-x86_64-linux/__init__.py
+++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
@@ -10,11 +10,6 @@ def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
return out
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_and_mul(out, x)
return out
@@ -30,20 +25,6 @@ def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0)
return out
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
ops.gelu_fast(out, x)
return out
@@ -61,15 +42,11 @@ def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
__all__ = [
"silu_and_mul",
- "mul_and_silu",
"gelu_and_mul",
"gelu_tanh_and_mul",
"fatrelu_and_mul",
"gelu_fast",
"gelu_new",
"gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
"layers",
]
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..fc35d5f1bdad2b294897ace78c80c2913e8bd793
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_activation_qr3gs3eckeig4.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df184a6315118d787a1bd6b435cb45f1ca7828445a1f1c0e55c57645cfbba43a
+size 2362600
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..11bf3a1e13ad4f53348d55ce0b8e727307534d67
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_qr3gs3eckeig4
+ops = torch.ops._activation_qr3gs3eckeig4
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_qr3gs3eckeig4::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..e24099a45ad0e0ab481769c24c749947c164b07c
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ccb13cfc2e45cf483e8b9f77f1760f28b48bcf185508d51b32d45bc759c4e8bb
+size 2385440
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbe888c4ec71536f08f7de2d823b75d9bbac0173
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_p7gbzt25w3zg2
+ops = torch.ops._activation_p7gbzt25w3zg2
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_p7gbzt25w3zg2::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..dc1fd3cc2e28051b8a263a5baf287b794d741f41
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4f8048853e8cb06e8574a9a9497800d2be438f7989d79f44dcf2e0ced38a75a9
+size 2420192
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c022ad4f749236320938c6ae13079a445e860ba
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_jg7yaigtn7wco
+ops = torch.ops._activation_jg7yaigtn7wco
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_jg7yaigtn7wco::{op_name}"
\ No newline at end of file
diff --git a/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..951cd024fc65dcaf31f0ce123d7c56538db90255
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cde5439e78ba0e1aaa1937d798b214b46d38cbab8e4384b93a22239fed1a4dd4
+size 2370264
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4538ecbd1302013d2026d413f07fefa1e3ed1ba
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_ncisyrun7guwk
+ops = torch.ops._activation_ncisyrun7guwk
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_ncisyrun7guwk::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..7cc13c8b18b95f7a24b708e41f20daa15cf8e5ba
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f6bd20d411c51fc8729b15cab6a60c5c9185222474aa035489e1bff299d76682
+size 2428040
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc135b9b87ed568acd3b7ae002760780202297ab
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_ochhfvlnc3vyc
+ops = torch.ops._activation_ochhfvlnc3vyc
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_ochhfvlnc3vyc::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..ed72afe1b709df6b64ae4daf96dfacf5397334e1
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41c18b20c2bf8c49d2d3088a9bc1aad4293df0b57eafc9b141a9e8e595fe551a
+size 2436672
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..03feb54a67ee96a3181145a654e9c1d3432d3c83
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_u6vnqubnicksq
+ops = torch.ops._activation_u6vnqubnicksq
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_u6vnqubnicksq::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..d9c4fba021babdd0966b329da77216730a212c19
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cfbcd5da358cd5cb7982d19c8880cf4db6f08b46622a7a953f755ad59e4e1492
+size 2362752
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ec67ec6be213233dc83cb83dcd9e3d8cade5a98
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_2vn6ty3gfqfb6
+ops = torch.ops._activation_2vn6ty3gfqfb6
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_2vn6ty3gfqfb6::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..486879e6269e0c7d0763ac11d82a100dec1b1b91
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1bc928823117c800904bcd3492bf1a0c65a32f6d8a842dc039f55e29831ab49
+size 2420344
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f4e8d31b42ed7be77b8eaef9aa29251327009bf
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_myvteedxdpqc6
+ops = torch.ops._activation_myvteedxdpqc6
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_myvteedxdpqc6::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so
new file mode 100755
index 0000000000000000000000000000000000000000..42ad3291140b013999afb683ec84c23804759e46
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:474727e434a9cd4ec984a6da7124992ead4ca0fefce9581d0fd503e36c065aed
+size 2424888
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6d4e4c91a867d657f287510c40366bccef86c94
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py
@@ -0,0 +1,9 @@
+import torch
+from . import _activation_rbswus6emrhm2
+ops = torch.ops._activation_rbswus6emrhm2
+
+def add_op_namespace_prefix(op_name: str):
+ """
+ Prefix op by namespace.
+ """
+ return f"_activation_rbswus6emrhm2::{op_name}"
\ No newline at end of file
diff --git a/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/build/torch26-cxx98-cu126-x86_64-linux/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index bbf3ad846a76e365312ad965559a177976801396..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 47765ef8e985a500bbb3e25990387a1f1f15c767..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index de62862184381714910c79ecdf8db3ca14f8a753..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu118-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index c6c9665f880b574481be0f6464ac7637e732df84..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:ce06ec284ecd4ac5423d3822a60cd9eeb686d0054b38d66567de73e1137b0567
-size 2773632
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py
deleted file mode 100644
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 29e76b5c619af9b19c5650edcfd4f63c4725d35f..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index f54053b63e8c2b7598967b6ca9739ecc85d6142a..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index 4d4a3c1172a3a2b4c954199c9762b3251d1c468c..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu126-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index e9e9102689a8ddf42f881abedcd19e137f22d5e4..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a529bd105aca5081398d63329e829b6b159570424cd654d3a9f275ca9a720e82
-size 2852200
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py
deleted file mode 100644
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu126-x86_64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 390f279894bed7ce9346ede4953b9ffc9e1b1808..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 86ca448fc1e6e7e119172b94f978b4a88aeda3e1..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index cbcd1da77da3529c73226d8ed8decfae8b9e5436..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 4df8f1606a76b66c06d538cd25db8e894d282405..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:34bdeb9ab72686850aef0a16b225b1b956162edb2cf46cba65c5e5b92ae267ae
-size 4207000
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-aarch64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 364976ff5017b183a827c0dfcda90becfbab0e7c..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 008e1b91db1ae539587989af1a212f9cd38a1ae2..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index d00f03a5b9a4944132d13ac0986acc2c54e0ca3c..0000000000000000000000000000000000000000
Binary files a/build/torch27-cxx11-cu128-x86_64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so b/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so
deleted file mode 100755
index 6d8adc0f26f3b10cbc1b441b74bc7f49c0ebdaae..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_activation_beeaae6.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0f2cffcb6b5b9a49f03a2df46fc2ad36765676edecb468c233e78e1f5e21e206
-size 4127872
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py
deleted file mode 100644
index 4d722bffa37106dd2bfdb75db14408c7eecefcb0..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_beeaae6
-ops = torch.ops._activation_beeaae6
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_beeaae6::{op_name}"
\ No newline at end of file
diff --git a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py b/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch27-cxx11-cu128-x86_64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 7aaa364368efe0e765de132c08296d189a969ede..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index cc7b128cfd05527bc856b66cdaf7d33691835eae..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index e547c241f319a637fa590b09ad35c1592aacce40..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
deleted file mode 100755
index 8121d3da5057e1d53e4dee4b60de1e13285bd3e0..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:02b62f5d045f370c3fb7c0e7ef458165feb987fba186b8cb9aee55c735a82e93
-size 2699928
diff --git a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_0c3eb4e_dirty
-ops = torch.ops._activation_0c3eb4e_dirty
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_0c3eb4e_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index cabcacd16040aad8134b2892ea8f1f9781a9a78b..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:cf76431ff46ef5bc002ce8813eeed3ae9618a15094d98ef4b164f7a10a54f0bc
-size 3121056
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py b/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/layers.py b/build/torch28-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json b/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index bdb5a121a09f628a672c404f5207f691347f83c5..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 0d8c166048d114380e068ca6448ab46ef96da034..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index ea0551b7b1c5e408b9875b62598f6f5f0b489a30..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so b/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
deleted file mode 100755
index f1d23623c037de97ee0207fe5f750d8ba9863d3c..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_activation_0c3eb4e_dirty.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:2f7fe0a00eaf2e228f237ee3058ac9eb2c6fbc4927b1276d0f566bb05bb043b9
-size 3683080
diff --git a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0f883290f823dd4b9ad1432d6644d25bcd3a4acf..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_0c3eb4e_dirty
-ops = torch.ops._activation_0c3eb4e_dirty
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_0c3eb4e_dirty::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index 564e3aa415dbcea5a132bfb14301b4900373fb58..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:2dc0a42d5ebcae09615265a3635bb90d33c76d9179fcfcec17fb2fc5cb16b7f5
-size 4400792
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py b/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/layers.py b/build/torch28-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json b/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index e53c600baf751d47e3c75f0ea262aaa74cbaa2a0..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index cfe526dc3c92a5c7b1a46084e58d4448fc74b15b..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index 878be1d140d35a1a92eb1b870cd3ccc0bbb65128..0000000000000000000000000000000000000000
Binary files a/build/torch28-cxx11-cu129-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 485825618d1d0c2e93123fe5197999883b59b748..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3af83bae80c8641200010ba586e5a2cac271fa4fcd344e3532ea7d5094fd7c17
-size 4275744
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py b/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-aarch64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so b/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so
deleted file mode 100644
index ff0d1df159bdd317b6293331073a9aab2d4bd06c..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/_activation_f8d6759.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:48d0f9b82abd2e6d7154889814140b789e2d4452aac1296d921c9a2d4ab19e91
-size 4438672
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py b/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
deleted file mode 100644
index 140c6e96b3f93ce5b359648edac4dcb2913b8324..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_f8d6759
-ops = torch.ops._activation_f8d6759
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_f8d6759::{op_name}"
\ No newline at end of file
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py b/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/layers.py b/build/torch28-cxx11-cu129-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json b/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch28-cxx11-cu129-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 60dc82724c779cfa41bd9b8dcf39c036e2a50109..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 48cda67561066b31e84ee5ecebcf0ef61e1ad322..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index 0082ca0b0e28577622a3e430602fabe010369318..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu126-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 41c75640cfdc7eeff3d57f4a6d403f7e7f10b8d8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:f9c24e0eb75a09a9fc19e7096276d560226f198617291681c1a18e94002a629e
-size 2963480
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-aarch64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index 3bb70b2a77f8c7dd8f0125e896cfca9359138ff9..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c84b682f2dd4437835661f57f031d96865871f6f4ab25f5651d4f577acee1326
-size 3121128
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py b/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/layers.py b/build/torch29-cxx11-cu126-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json b/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu126-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 4d338b4d5170fa0130189f67e65562998f8f42be..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index be7ffd679d4afbc36ea076dbc57e3162a60bd409..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index e50041e74611417f4e4037e568a9e041780a5e32..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu128-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index dc83e4989904884309410757826ec095ea0fdfe4..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:08ee3dfa4d481eaf44ac3c11a0843598c05950f779dba66abd468fecb7839b32
-size 4208760
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-aarch64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index e2e49fb0c5b136351663cc36a368639afff8a47c..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3afed8f50b04121c408e2b7fc8f4920015ba696b97e54be8e165cbbdd7039d6b
-size 4400864
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py b/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/layers.py b/build/torch29-cxx11-cu128-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json b/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu128-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc
deleted file mode 100644
index 21696c8710d6b717d92ebd34545a9ac97cc44942..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/__init__.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc
deleted file mode 100644
index 1856969205a3825653d4be5e4c267a9585ff6594..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/_ops.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc b/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc
deleted file mode 100644
index a8e0f48d49bb34730201d17d0795310d829e20cb..0000000000000000000000000000000000000000
Binary files a/build/torch29-cxx11-cu130-aarch64-linux/activation/__pycache__/layers.cpython-313.pyc and /dev/null differ
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so b/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so
deleted file mode 100644
index 02267d619c1ad4c0bb7f84b243e5456c6bf7c798..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_activation_320b408.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:73748b54059552f5983322f7dedc36ed349b38ad6fb9318301bb4965b1fe49aa
-size 4094968
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py
deleted file mode 100644
index 0fe83704e6d8850cb94dd0434fb763bff8e7e953..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_320b408
-ops = torch.ops._activation_320b408
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_320b408::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py b/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py
deleted file mode 100644
index 0aec9c95fa75e4d3ff699ce69fc6618798b179c1..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-aarch64-linux/activation/layers.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so b/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
deleted file mode 100644
index fcd78de80c8ca89e556f4f7255428b9dbbfaaf2d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6bc5534a57cbe83a6ebc51a13bce94eab7c06ce5b4d41eb4e9db83f77ae64902
-size 4180240
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py b/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py b/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/layers.py b/build/torch29-cxx11-cu130-x86_64-linux/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json b/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-cxx11-cu130-x86_64-linux/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/build/torch29-metal-aarch64-darwin/__init__.py b/build/torch29-metal-aarch64-darwin/__init__.py
deleted file mode 100644
index 1a9cd15a0a75f95c5ab956fb05c2a9860f218156..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/__init__.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import torch
-
-from ._ops import ops
-
-from . import layers
-
-
-def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu_and_mul(out, x)
- return out
-
-
-def mul_and_silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.mul_and_silu(out, x)
- return out
-
-
-def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_and_mul(out, x)
- return out
-
-
-def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
- ops.fatrelu_and_mul(out, x, threshold)
- return out
-
-
-def gelu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu(out, x)
- return out
-
-def silu(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.silu(out, x)
- return out
-
-
-def gelu_tanh(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_tanh(out, x)
- return out
-
-
-def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_fast(out, x)
- return out
-
-
-def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_new(out, x)
- return out
-
-
-def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
- ops.gelu_quick(out, x)
- return out
-
-
-__all__ = [
- "silu_and_mul",
- "mul_and_silu",
- "gelu_and_mul",
- "gelu_tanh_and_mul",
- "fatrelu_and_mul",
- "gelu_fast",
- "gelu_new",
- "gelu_quick",
- "gelu_tanh",
- "silu",
- "gelu",
- "layers",
-]
diff --git a/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so b/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so
deleted file mode 100644
index 986b7947b413077b8d8acf3967a52ee556212268..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/_activation_63b875f.abi3.so
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:884e887217a67931f5a59b3c39487acb754ff51282adb6b13b5db669e39cb12e
-size 220504
diff --git a/build/torch29-metal-aarch64-darwin/_ops.py b/build/torch29-metal-aarch64-darwin/_ops.py
deleted file mode 100644
index 602229319b5ec8bd38c2cd107da58e1e9e968b8d..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/_ops.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import torch
-from . import _activation_63b875f
-ops = torch.ops._activation_63b875f
-
-def add_op_namespace_prefix(op_name: str):
- """
- Prefix op by namespace.
- """
- return f"_activation_63b875f::{op_name}"
\ No newline at end of file
diff --git a/build/torch29-metal-aarch64-darwin/activation/__init__.py b/build/torch29-metal-aarch64-darwin/activation/__init__.py
deleted file mode 100644
index 03dbc1afe1cf156661a2b1b22003cd5f599a0309..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/activation/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import ctypes
-import sys
-
-import importlib
-from pathlib import Path
-from types import ModuleType
-
-def _import_from_path(file_path: Path) -> ModuleType:
- # We cannot use the module name as-is, after adding it to `sys.modules`,
- # it would also be used for other imports. So, we make a module name that
- # depends on the path for it to be unique using the hex-encoded hash of
- # the path.
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
- module_name = path_hash
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
- module = importlib.util.module_from_spec(spec)
- if module is None:
- raise ImportError(f"Cannot load module {module_name} from spec")
- sys.modules[module_name] = module
- spec.loader.exec_module(module) # type: ignore
- return module
-
-
-globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
diff --git a/build/torch29-metal-aarch64-darwin/layers.py b/build/torch29-metal-aarch64-darwin/layers.py
deleted file mode 100644
index 2f66f39d58561e0ff9d43eb943fac9e92e6a8259..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/layers.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ._ops import ops
-
-
-class SiluAndMul(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> silu(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.silu_and_mul(out, x)
- return out
-
-class Silu(nn.Module):
- """An activation function for SiLU.
-
- The function computes x -> silu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.silu(out, x)
- return out
-
-class Gelu(nn.Module):
- """An activation function for GELU.
-
- The function computes x -> gelu(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu(out, x)
- return out
-
-class GeluTanh(nn.Module):
- """An activation function for GELU with `tanh` approximation.
-
- The function computes x -> gelu_tanh(x).
-
- Shapes:
- x: (num_tokens, d) or (batch_size, seq_len, d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_tanh(out, x)
- return out
-
-
-class MulAndSilu(nn.Module):
- """An activation function for SwiGLU.
-
- The function computes x -> x[:d] * silu(x[d:]) where d = x.shape[-1] // 2.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.mul_and_silu(out, x)
- return out
-
-
-class GeluAndMul(nn.Module):
- """An activation function for GeGLU.
-
- The function computes x -> GELU(x[:d]) * x[d:] where d = x.shape[-1] // 2.
-
- Shapes:
- x: (batch_size, seq_len, 2 * d) or (num_tokens, 2 * d)
- return: (batch_size, seq_len, d) or (num_tokens, d)
- """
-
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_and_mul(out, x)
- return out
-
-
-class GeluTanhAndMul(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.gelu_tanh_and_mul(out, x)
- return out
-
-
-class FatreluAndMul(nn.Module):
- """An activation function for FATReLU.
-
- The function computes x -> FATReLU(x[:d]) * x[d:] where
- d = x.shape[-1] // 2.
- This is used in openbmb/MiniCPM-S-1B-sft.
-
- Shapes:
- x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d)
- return: (num_tokens, d) or (batch_size, seq_len, d)
- """
-
- can_torch_compile: bool = True
-
- def __init__(self, threshold: float = 0.0):
- super().__init__()
- self.threshold = threshold
-
- def forward(self, x: torch.Tensor):
- if not x.is_contiguous():
- x = x.contiguous()
- d = x.shape[-1] // 2
- output_shape = x.shape[:-1] + (d,)
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
- ops.fatrelu_and_mul(out, x, self.threshold)
- return out
-
-
-class FastGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_fast(out, x)
- return out
-
-
-class NewGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_new(out, x)
- return out
-
-
-class QuickGELU(nn.Module):
- can_torch_compile: bool = True
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if not x.is_contiguous():
- x = x.contiguous()
- out = torch.empty_like(x)
- ops.gelu_quick(out, x)
- return out
diff --git a/build/torch29-metal-aarch64-darwin/metadata.json b/build/torch29-metal-aarch64-darwin/metadata.json
deleted file mode 100644
index 9cf5deed9898dce769f4cc73913d3530b92a0bd8..0000000000000000000000000000000000000000
--- a/build/torch29-metal-aarch64-darwin/metadata.json
+++ /dev/null
@@ -1,4 +0,0 @@
-{
- "version": 1,
- "python-depends": []
-}
\ No newline at end of file
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 0000000000000000000000000000000000000000..54ac44c0698d43fb86a123430f5e9d2e9bcda8ea
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,17 @@
+{
+ description = "Flake for activation kernels";
+
+ inputs = {
+ kernel-builder.url = "github:huggingface/kernel-builder";
+ };
+
+ outputs =
+ {
+ self,
+ kernel-builder,
+ }:
+ kernel-builder.lib.genFlakeOutputs {
+ path = ./.;
+ rev = self.shortRev or self.dirtyShortRev or self.lastModifiedDate;
+ };
+}
diff --git a/media/benches.gif b/media/benches.gif
deleted file mode 100644
index 10667fe862b382eb21f5e81757be71a0d42f4b59..0000000000000000000000000000000000000000
--- a/media/benches.gif
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:47790b5e6098edf91310ec8de8f28f4781be3a2e954242fc5aa1d8f5560d217d
-size 2839393
diff --git a/media/benches.mp4 b/media/benches.mp4
deleted file mode 100644
index 7c6e8a999069242522d6cd5d98936dbe42f7077e..0000000000000000000000000000000000000000
--- a/media/benches.mp4
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:8431422e8080a993672ca1440a1be6e9cde1737caec96a1a39dbc4df9fa41d29
-size 209572
diff --git a/media/benches_dark_animation.svg b/media/benches_dark_animation.svg
deleted file mode 100644
index 9f5d85ce8e4bda25a90d8a366c1cd18a7151269d..0000000000000000000000000000000000000000
--- a/media/benches_dark_animation.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-
\ No newline at end of file
diff --git a/media/benches_dark_latency.svg b/media/benches_dark_latency.svg
deleted file mode 100644
index 54b01c10c89fade7e75cdec06c390c86b043b50a..0000000000000000000000000000000000000000
--- a/media/benches_dark_latency.svg
+++ /dev/null
@@ -1,2011 +0,0 @@
-
-
-
diff --git a/media/benches_dark_throughput.svg b/media/benches_dark_throughput.svg
deleted file mode 100644
index 46ca3b5f9d4af3646774948357e1897025bbdcf3..0000000000000000000000000000000000000000
--- a/media/benches_dark_throughput.svg
+++ /dev/null
@@ -1,2254 +0,0 @@
-
-
-
diff --git a/media/benches_latency.png b/media/benches_latency.png
deleted file mode 100644
index 23c0190661b9727df8205f133a0d39494becf109..0000000000000000000000000000000000000000
Binary files a/media/benches_latency.png and /dev/null differ
diff --git a/media/benches_light_animation.svg b/media/benches_light_animation.svg
deleted file mode 100644
index ebb9aa177a36416141a3f9a39b162ed8f5dd3bef..0000000000000000000000000000000000000000
--- a/media/benches_light_animation.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-
\ No newline at end of file
diff --git a/media/benches_light_latency.svg b/media/benches_light_latency.svg
deleted file mode 100644
index 5aff08793189b1741313da91846bf1c468457bf6..0000000000000000000000000000000000000000
--- a/media/benches_light_latency.svg
+++ /dev/null
@@ -1,2011 +0,0 @@
-
-
-
diff --git a/media/benches_light_throughput.svg b/media/benches_light_throughput.svg
deleted file mode 100644
index 363d8b120646ed613e20aef3a377f53fa86d0ebc..0000000000000000000000000000000000000000
--- a/media/benches_light_throughput.svg
+++ /dev/null
@@ -1,2254 +0,0 @@
-
-
-
diff --git a/media/benches_throughput.png b/media/benches_throughput.png
deleted file mode 100644
index 5b2a4a7ca27e84fe600a6552dbecf234f3a9d0ee..0000000000000000000000000000000000000000
Binary files a/media/benches_throughput.png and /dev/null differ
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/kernels/__init__.py b/tests/kernels/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/kernels/allclose_default.py b/tests/kernels/allclose_default.py
new file mode 100644
index 0000000000000000000000000000000000000000..80eb1eeb9fb738d70efe28d64df98b2ff7223463
--- /dev/null
+++ b/tests/kernels/allclose_default.py
@@ -0,0 +1,14 @@
+import torch
+
+# Reference default values of atol and rtol are from
+# https://github.com/pytorch/pytorch/blob/6d96beb6bec24d73ee3f080bac54d2104068f675/test/test_transformers.py#L67
+default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float: 1e-5}
+default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float: 1.3e-6}
+
+
+def get_default_atol(output) -> float:
+ return default_atol[output.dtype]
+
+
+def get_default_rtol(output) -> float:
+ return default_rtol[output.dtype]
diff --git a/tests/kernels/test_activation.py b/tests/kernels/test_activation.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d6aa773c9abcb5d3c0d61646e465aae9951966d
--- /dev/null
+++ b/tests/kernels/test_activation.py
@@ -0,0 +1,165 @@
+import math
+import random
+from typing import Type
+
+import activation
+import pytest
+import torch
+import torch.nn.functional as F
+
+from .utils import opcheck
+from .allclose_default import get_default_atol, get_default_rtol
+
+DTYPES = [torch.half, torch.bfloat16, torch.float]
+NUM_TOKENS = [7, 83, 2048] # Arbitrary values for testing
+D = [512, 13824] # Arbitrary values for testing
+SEEDS = [0]
+CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
+
+
+def gelu_fast(x: torch.Tensor) -> torch.Tensor:
+ return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
+
+
+def gelu_new(x: torch.Tensor) -> torch.Tensor:
+ c = math.sqrt(2.0 / math.pi)
+ return 0.5 * x * (1.0 + torch.tanh(c * (x + 0.044715 * torch.pow(x, 3.0))))
+
+
+def gelu_quick(x: torch.Tensor) -> torch.Tensor:
+ return x * torch.sigmoid(1.702 * x)
+
+
+def fatrelu_and_mul(x: torch.Tensor, threshold: float) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ x1 = x[..., :d]
+ x2 = x[..., d:]
+ x1 = F.threshold(x1, threshold, 0.0)
+ return x1 * x2
+
+
+def silu_and_mul(x: torch.Tensor) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ return F.silu(x[..., :d]) * x[..., d:]
+
+
+def gelu_and_mul(x: torch.Tensor, approximate: str) -> torch.Tensor:
+ d = x.shape[-1] // 2
+ return F.gelu(x[..., :d], approximate=approximate) * x[..., d:]
+
+
+@pytest.mark.parametrize("activation_name", ["silu", "gelu", "gelu_tanh", "fatrelu"])
+@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
+@pytest.mark.parametrize("d", D)
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("seed", SEEDS)
+@pytest.mark.parametrize("device", CUDA_DEVICES)
+@torch.inference_mode()
+def test_act_and_mul(
+ activation_name: str,
+ num_tokens: int,
+ d: int,
+ dtype: torch.dtype,
+ seed: int,
+ device: str,
+) -> None:
+ random.seed(seed)
+ torch.manual_seed(seed)
+ torch.set_default_device(device)
+ x = torch.randn(num_tokens, 2 * d, dtype=dtype)
+ if activation_name == "silu":
+ torch_fn = silu_and_mul
+ fn = activation.silu_and_mul
+ op = activation.ops.silu_and_mul
+ layer = activation.layers.SiluAndMul()
+ elif activation_name == "gelu":
+ torch_fn = lambda x: gelu_and_mul(x, "none")
+ fn = activation.gelu_and_mul
+ op = activation.ops.gelu_and_mul
+ layer = activation.layers.GeluAndMul()
+ elif activation_name == "gelu_tanh":
+ torch_fn = lambda x: gelu_and_mul(x, "tanh")
+ fn = activation.gelu_tanh_and_mul
+ op = activation.ops.gelu_tanh_and_mul
+ layer = activation.layers.GeluTanhAndMul()
+ elif activation_name == "fatrelu":
+ threshold = random.uniform(0, 1)
+ torch_fn = lambda x: fatrelu_and_mul(x, threshold)
+ fn = lambda out, x: activation.fatrelu_and_mul(out, x, threshold)
+ op = activation.ops.fatrelu_and_mul
+ layer = activation.layers.FatreluAndMul(threshold)
+
+ out_shape = x.shape[:-1] + (x.shape[-1] // 2,)
+ out = torch.empty(out_shape, dtype=x.dtype, device=x.device)
+ out = fn(out, x)
+ mod_out = layer(x)
+ ref_out = torch_fn(x)
+
+ # The SiLU, GELU and FatReLU implementations are equivalent to the native
+ # PyTorch implementations, so we can do exact comparison.
+ torch.testing.assert_close(out, ref_out, atol=0.0, rtol=0.0)
+ torch.testing.assert_close(mod_out, ref_out, atol=0.0, rtol=0.0)
+
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ if activation_name == "fatrelu":
+ opcheck(op, (out, x, threshold))
+ else:
+ opcheck(op, (out, x))
+
+
+@pytest.mark.parametrize(
+ "activation_fns",
+ [
+ (
+ gelu_fast,
+ activation.gelu_fast,
+ activation.ops.gelu_fast,
+ activation.layers.FastGELU,
+ ),
+ (
+ gelu_new,
+ activation.gelu_new,
+ activation.ops.gelu_new,
+ activation.layers.NewGELU,
+ ),
+ (
+ gelu_quick,
+ activation.gelu_quick,
+ activation.ops.gelu_quick,
+ activation.layers.QuickGELU,
+ ),
+ ],
+)
+@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
+@pytest.mark.parametrize("d", D)
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("seed", SEEDS)
+@pytest.mark.parametrize("device", CUDA_DEVICES)
+@torch.inference_mode()
+def test_activation(
+ activation_fns,
+ num_tokens: int,
+ d: int,
+ dtype: torch.dtype,
+ seed: int,
+ device: str,
+) -> None:
+ torch.manual_seed(seed)
+ torch.set_default_device(device)
+ x = torch.randn(num_tokens, d, dtype=dtype)
+ torch_fn, fn, op, cls = activation_fns
+ layer = cls()
+ out = fn(torch.empty_like(x), x)
+ layer_out = layer(x)
+ ref_out = torch_fn(x)
+ torch.testing.assert_close(
+ out, ref_out, atol=get_default_atol(out), rtol=get_default_rtol(out)
+ )
+ torch.testing.assert_close(
+ out, layer_out, atol=get_default_atol(out), rtol=get_default_rtol(out)
+ )
+
+ out = torch.empty_like(x)
+ opcheck(op, (out, x))
diff --git a/tests/kernels/utils.py b/tests/kernels/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d24c5babfb9690031844f262212d80274fd478c6
--- /dev/null
+++ b/tests/kernels/utils.py
@@ -0,0 +1,73 @@
+"""Kernel test utils"""
+
+import itertools
+import random
+import unittest
+from numbers import Number
+from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
+
+import pytest
+import torch
+from torch._prims_common import TensorLikeType
+
+# For now, disable "test_aot_dispatch_dynamic" since there are some
+# bugs related to this test in PyTorch 2.4.
+DEFAULT_OPCHECK_TEST_UTILS: Tuple[str, ...] = (
+ "test_schema",
+ "test_autograd_registration",
+ "test_faketensor",
+)
+
+ALL_OPCHECK_TEST_UTILS: Tuple[str, ...] = (
+ "test_schema",
+ "test_autograd_registration",
+ "test_faketensor",
+ "test_aot_dispatch_dynamic",
+)
+
+
+# Copied/modified from torch._refs.__init__.py
+def fp8_allclose(
+ a: TensorLikeType,
+ b: TensorLikeType,
+ rtol: float = 1e-05,
+ atol: float = 1e-08,
+ equal_nan: bool = False,
+) -> bool:
+ """
+ Reference implementation of torch.allclose
+ """
+ torch._refs._check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol)
+
+ return bool(
+ torch.all(
+ torch.isclose(
+ a.double(), b.double(), rtol=rtol, atol=atol, equal_nan=equal_nan
+ )
+ ).item()
+ )
+
+
+# A special version of op check that has a restricted default set of test_utils
+# and a patched version of allclose that supports fp8 types.
+def opcheck(
+ op: Union[
+ torch._ops.OpOverload,
+ torch._ops.OpOverloadPacket,
+ torch._library.custom_ops.CustomOpDef,
+ ],
+ args: Tuple[Any, ...],
+ kwargs: Optional[Dict[str, Any]] = None,
+ *,
+ test_utils: Union[str, Sequence[str]] = ALL_OPCHECK_TEST_UTILS,
+ raise_exception: bool = True,
+ cond: bool = True
+) -> Dict[str, str]:
+ with unittest.mock.patch("torch.allclose", new=fp8_allclose):
+ return (
+ torch.library.opcheck(
+ op, args, kwargs, test_utils=test_utils, raise_exception=raise_exception
+ )
+ if cond
+ else {}
+ )
diff --git a/torch-ext/activation/__init__.py b/torch-ext/activation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddb37490dad9d8ffcbeb13ed06b33f03fef8ed78
--- /dev/null
+++ b/torch-ext/activation/__init__.py
@@ -0,0 +1,52 @@
+import torch
+
+from ._ops import ops
+
+from . import layers
+
+
+def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.silu_and_mul(out, x)
+ return out
+
+
+def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
+ ops.fatrelu_and_mul(out, x, threshold)
+ return out
+
+
+def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_fast(out, x)
+ return out
+
+
+def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_new(out, x)
+ return out
+
+
+def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
+ ops.gelu_quick(out, x)
+ return out
+
+
+__all__ = [
+ "silu_and_mul",
+ "gelu_and_mul",
+ "gelu_tanh_and_mul",
+ "fatrelu_and_mul",
+ "gelu_fast",
+ "gelu_new",
+ "gelu_quick",
+ "layers",
+]
diff --git a/torch-ext/activation/layers.py b/torch-ext/activation/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c129e3b1c9ed4c18166d5b5d67eb08f137a27f
--- /dev/null
+++ b/torch-ext/activation/layers.py
@@ -0,0 +1,65 @@
+import torch
+import torch.nn as nn
+
+from ._ops import ops
+
+
+class SiluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.silu_and_mul(out, x)
+ return out
+
+
+class GeluAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_and_mul(out, x)
+ return out
+
+
+class GeluTanhAndMul(nn.Module):
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.gelu_tanh_and_mul(out, x)
+ return out
+
+
+class FatreluAndMul(nn.Module):
+ def __init__(self, threshold: float = 0.0):
+ super().__init__()
+ self.threshold = threshold
+
+ def forward(self, x: torch.Tensor):
+ d = x.shape[-1] // 2
+ output_shape = x.shape[:-1] + (d,)
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
+ ops.fatrelu_and_mul(out, x, self.threshold)
+ return out
+
+
+class FastGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_fast(out, x)
+ return out
+
+
+class NewGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_new(out, x)
+ return out
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ out = torch.empty_like(x)
+ ops.gelu_quick(out, x)
+ return out
diff --git a/torch-ext/torch_binding.cpp b/torch-ext/torch_binding.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b6148ecc33137085656b962c0cdc10fd480dc787
--- /dev/null
+++ b/torch-ext/torch_binding.cpp
@@ -0,0 +1,37 @@
+#include
+
+#include "registration.h"
+#include "torch_binding.h"
+
+TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
+ // Activation ops
+ // Activation function used in SwiGLU.
+ ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul);
+
+ // Activation function used in GeGLU with `none` approximation.
+ ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul);
+
+ // Activation function used in GeGLU with `tanh` approximation.
+ ops.def("gelu_tanh_and_mul(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_tanh_and_mul", torch::kCUDA, &gelu_tanh_and_mul);
+
+ // FATReLU implementation.
+ ops.def("fatrelu_and_mul(Tensor! out, Tensor input, float threshold) -> ()");
+ ops.impl("fatrelu_and_mul", torch::kCUDA, &fatrelu_and_mul);
+
+ // GELU implementation used in GPT-2.
+ ops.def("gelu_new(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_new", torch::kCUDA, &gelu_new);
+
+ // Approximate GELU implementation.
+ ops.def("gelu_fast(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_fast", torch::kCUDA, &gelu_fast);
+
+ // Quick GELU implementation.
+ ops.def("gelu_quick(Tensor! out, Tensor input) -> ()");
+ ops.impl("gelu_quick", torch::kCUDA, &gelu_quick);
+}
+
+REGISTER_EXTENSION(TORCH_EXTENSION_NAME)
diff --git a/torch-ext/torch_binding.h b/torch-ext/torch_binding.h
new file mode 100644
index 0000000000000000000000000000000000000000..cb163cfc1da061377d077bab6d12a8b048d60fa5
--- /dev/null
+++ b/torch-ext/torch_binding.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include
+
+void silu_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_tanh_and_mul(torch::Tensor &out, torch::Tensor &input);
+
+void fatrelu_and_mul(torch::Tensor &out, torch::Tensor &input,
+ double threshold);
+
+void gelu_new(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_fast(torch::Tensor &out, torch::Tensor &input);
+
+void gelu_quick(torch::Tensor &out, torch::Tensor &input);