classModule: def__call__(self, *input, **kwargs): # 在调用 forward 之前执行一些操作 # 例如,调用 forward pre-hooks for hook inself._forward_pre_hooks.values(): result = hook(self, input) if result isnotNone: ifnotisinstance(result, tuple): result = (result,) input = result
# 调用 forward 方法 result = self.forward(*input, **kwargs)
# 在调用 forward 之后执行一些操作 # 例如,调用 forward hooks for hook inself._forward_hooks.values(): hook_result = hook(self, input, result) if hook_result isnotNone: result = hook_result
__global__ voiddevice_copy_vector4_kernel(int* d_in, int* d_out, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for(int i = idx; i < N/4; i += blockDim.x * gridDim.x) { reinterpret_cast<int4*>(d_out)[i] = reinterpret_cast<int4*>(d_in)[i]; }
// in only one thread, process final elements (if there are any) int remainder = N%4; if (idx==N/4 && remainder!=0) { while(remainder) { int idx = N - remainder--; d_out[idx] = d_in[idx]; } } }
voiddevice_copy_vector4(int* d_in, int* d_out, int N) { int threads = 128; int blocks = min((N/4 + threads-1) / threads, MAX_BLOCKS);
import torch from torch.nn import Sequential as Seq, Linear, ReLU from torch_geometric.nn import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops classSAGEConv(MessagePassing): def__init__(self, in_channels, out_channels): super(SAGEConv, self).__init__(aggr='max') # "Max" aggregation. self.lin = torch.nn.Linear(in_channels, out_channels) self.act = torch.nn.ReLU() self.update_lin = torch.nn.Linear(in_channels + out_channels, in_channels, bias=False) self.update_act = torch.nn.ReLU() defforward(self, x, edge_index): # x has shape [N, in_channels] # edge_index has shape [2, E] # Removes every self-loop in the graph given by edge_index, so that (i,i)∉E for every i ∈ V. edge_index, _ = remove_self_loops(edge_index) # Adds a self-loop (i,i)∈ E to every node i ∈ V in the graph given by edge_index edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) returnself.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) defmessage(self, x_j): # x_j has shape [E, in_channels] x_j = self.lin(x_j) x_j = self.act(x_j) return x_j defupdate(self, aggr_out, x): # aggr_out has shape [N, out_channels] new_embedding = torch.cat([aggr_out, x], dim=1) new_embedding = self.update_lin(new_embedding) new_embedding = self.update_act(new_embedding) return new_embedding
异常简单,默认安装在自己.local/bin下,会自动修改bashrc/zshrc On Linux and macOS systems, this is done as follows:
1
curl https://sh.rustup.rs -sSf | sh
基础语法
printf
1 2 3 4 5 6 7
impl ClassName { pub fn printFunc() { let a = 12; println!("a is {0}, a again is {0}", a); //println 不是一个函数,而是一个宏规则。所以有感叹号 } }
变量
Rust 是强类型语言,但具有自动判断变量类型的能力。
1 2 3 4 5 6 7 8 9 10 11
//可以指定类型 let a: u64 = 123; //不可变变量 let a = 123; let a = 456; //不是复制是,重新绑定 let s2 = s1.clone(); //这才是真复制 //变量 let mut a = 123; a = 456; //常量 const a: i32 = 123;
函数
函数返回值
Rust 函数声明返回值类型的方式:在参数声明之后用 -> 来声明函数返回值的类型(不是 : )。
不写return是将最后一个当作返回值?(貌似是
Rust是如何实现内存安全的呢?
内存安全
buffer overflow
null pointer dereference
use after free
use of uninitialized memory
illegal free (of an already-freed pointer, or a non-malloced pointer)