fresh zerver commit
This commit is contained in:
commit
a03e0259da
8
.gitignore
vendored
Normal file
8
.gitignore
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
zig-cache/
|
||||
zig-out/
|
||||
/release/
|
||||
/debug/
|
||||
/build/
|
||||
/build-*/
|
||||
/docgen_tmp/
|
||||
*.db
|
7
.ignore
Normal file
7
.ignore
Normal file
|
@ -0,0 +1,7 @@
|
|||
src/.deps
|
||||
zig-cache
|
||||
zig-out/
|
||||
/release/
|
||||
/debug/
|
||||
/build/
|
||||
/build-*/
|
16
README.md
Normal file
16
README.md
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Dependencies
|
||||
|
||||
Run the following inside the folder `zerver/src/.deps`
|
||||
|
||||
```sh
|
||||
git clone git@github.com:rofrol/zig-postgres.git
|
||||
git clone git@github.com:karlseguin/http.zig.git
|
||||
git clone git@github.com:karlseguin/websocket.zig.git
|
||||
```
|
||||
|
||||
|
||||
To build and run the server while developing, run
|
||||
|
||||
```sh
|
||||
zig build run
|
||||
```
|
78
build.zig
Normal file
78
build.zig
Normal file
|
@ -0,0 +1,78 @@
|
|||
const std = @import("std");
|
||||
|
||||
// Although this function looks imperative, note that its job is to
|
||||
// declaratively construct a build graph that will be executed by an external
|
||||
// runner.
|
||||
pub fn build(b: *std.Build) void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
// for restricting supported target set are available.
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
// Standard optimization options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const sqlite = b.addStaticLibrary(.{ .name = "sqlite", .target = target, .optimize = .ReleaseSafe });
|
||||
sqlite.addCSourceFile("src/.deps/zig-sqlite/c/sqlite3.c", &[_][]const u8{"-std=c99"});
|
||||
sqlite.linkLibC();
|
||||
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "zerver",
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
exe.linkLibrary(sqlite);
|
||||
exe.addModule("sqlite", b.createModule(.{ .source_file = .{ .path = "src/.deps/zig-sqlite/sqlite.zig", } }));
|
||||
exe.addIncludePath("src/.deps/zig-sqlite/c");
|
||||
|
||||
// This declares intent for the executable to be installed into the
|
||||
// standard location when the user invokes the "install" step (the default
|
||||
// step when running `zig build`).
|
||||
b.installArtifact(exe);
|
||||
|
||||
// This *creates* a Run step in the build graph, to be executed when another
|
||||
// step is evaluated that depends on it. The next line below will establish
|
||||
// such a dependency.
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
|
||||
// By making the run step depend on the install step, it will be run from the
|
||||
// installation directory rather than directly from within the cache directory.
|
||||
// This is not necessary, however, if the application depends on other installed
|
||||
// files, this ensures they will be present and in the expected location.
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
|
||||
// This allows the user to pass arguments to the application in the build
|
||||
// command itself, like this: `zig build run -- arg1 arg2 etc`
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
}
|
||||
|
||||
// This creates a build step. It will be visible in the `zig build --help` menu,
|
||||
// and can be selected like this: `zig build run`
|
||||
// This will evaluate the `run` step rather than the default, which is "install".
|
||||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
// Creates a step for unit testing. This only builds the test executable
|
||||
// but does not run it.
|
||||
const unit_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
const run_unit_tests = b.addRunArtifact(unit_tests);
|
||||
|
||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||
// the `zig build --help` menu, providing a way for the user to request
|
||||
// running the unit tests.
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&run_unit_tests.step);
|
||||
}
|
11
docker/docker-compose.yml
Normal file
11
docker/docker-compose.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
gotify:
|
||||
image: gotify/server
|
||||
ports:
|
||||
- 8082:80
|
||||
environment:
|
||||
- GOTIFY_DEFAULTUSER_PASS=password
|
||||
volumes:
|
||||
- "./gotify_data:/app/data"
|
617
src/.deps/extras.zig
Normal file
617
src/.deps/extras.zig
Normal file
|
@ -0,0 +1,617 @@
|
|||
|
||||
const std = @import("std");
|
||||
const string = []const u8;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub fn fmtByteCountIEC(alloc: std.mem.Allocator, b: u64) !string {
|
||||
return try reduceNumber(alloc, b, 1024, "B", "KMGTPEZYRQ");
|
||||
}
|
||||
|
||||
pub fn reduceNumber(alloc: std.mem.Allocator, input: u64, comptime unit: u64, comptime base: string, comptime prefixes: string) !string {
|
||||
if (input < unit) {
|
||||
return std.fmt.allocPrint(alloc, "{d} {s}", .{ input, base });
|
||||
}
|
||||
var div = unit;
|
||||
var exp: usize = 0;
|
||||
var n = input / unit;
|
||||
while (n >= unit) : (n /= unit) {
|
||||
div *= unit;
|
||||
exp += 1;
|
||||
}
|
||||
return try std.fmt.allocPrint(alloc, "{d:.3} {s}{s}", .{ @intToFloat(f64, input) / @intToFloat(f64, div), prefixes[exp .. exp + 1], base });
|
||||
}
|
||||
|
||||
pub fn addSentinel(alloc: std.mem.Allocator, comptime T: type, input: []const T, comptime sentinel: T) ![:sentinel]const T {
|
||||
var list = try std.ArrayList(T).initCapacity(alloc, input.len + 1);
|
||||
try list.appendSlice(input);
|
||||
try list.append(sentinel);
|
||||
const str = list.toOwnedSlice();
|
||||
return str[0 .. str.len - 1 :sentinel];
|
||||
}
|
||||
|
||||
const alphabet = "0123456789abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
pub fn randomSlice(alloc: std.mem.Allocator, rand: std.rand.Random, comptime T: type, len: usize) ![]T {
|
||||
var buf = try alloc.alloc(T, len);
|
||||
var i: usize = 0;
|
||||
while (i < len) : (i += 1) {
|
||||
buf[i] = alphabet[rand.int(u8) % alphabet.len];
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub fn trimPrefix(in: string, prefix: string) string {
|
||||
if (std.mem.startsWith(u8, in, prefix)) {
|
||||
return in[prefix.len..];
|
||||
}
|
||||
return in;
|
||||
}
|
||||
|
||||
pub fn trimPrefixEnsure(in: string, prefix: string) ?string {
|
||||
if (!std.mem.startsWith(u8, in, prefix)) return null;
|
||||
return in[prefix.len..];
|
||||
}
|
||||
|
||||
pub fn trimSuffix(in: string, suffix: string) string {
|
||||
if (std.mem.endsWith(u8, in, suffix)) {
|
||||
return in[0 .. in.len - suffix.len];
|
||||
}
|
||||
return in;
|
||||
}
|
||||
|
||||
pub fn trimSuffixEnsure(in: string, suffix: string) ?string {
|
||||
if (!std.mem.endsWith(u8, in, suffix)) return null;
|
||||
return in[0 .. in.len - suffix.len];
|
||||
}
|
||||
|
||||
pub fn base64EncodeAlloc(alloc: std.mem.Allocator, input: string) !string {
|
||||
const base64 = std.base64.standard.Encoder;
|
||||
var buf = try alloc.alloc(u8, base64.calcSize(input.len));
|
||||
return base64.encode(buf, input);
|
||||
}
|
||||
|
||||
pub fn base64DecodeAlloc(alloc: std.mem.Allocator, input: string) !string {
|
||||
const base64 = std.base64.standard.Decoder;
|
||||
var buf = try alloc.alloc(u8, try base64.calcSizeForSlice(input));
|
||||
try base64.decode(buf, input);
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub fn asciiUpper(alloc: std.mem.Allocator, input: string) ![]u8 {
|
||||
var buf = try alloc.dupe(u8, input);
|
||||
for (0..buf.len) |i| {
|
||||
buf[i] = std.ascii.toUpper(buf[i]);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub fn doesFolderExist(dir: ?std.fs.Dir, fpath: []const u8) !bool {
|
||||
const file = (dir orelse std.fs.cwd()).openFile(fpath, .{}) catch |e| switch (e) {
|
||||
error.FileNotFound => return false,
|
||||
error.IsDir => return true,
|
||||
else => return e,
|
||||
};
|
||||
defer file.close();
|
||||
const s = try file.stat();
|
||||
if (s.kind != .directory) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn doesFileExist(dir: ?std.fs.Dir, fpath: []const u8) !bool {
|
||||
const file = (dir orelse std.fs.cwd()).openFile(fpath, .{}) catch |e| switch (e) {
|
||||
error.FileNotFound => return false,
|
||||
error.IsDir => return true,
|
||||
else => return e,
|
||||
};
|
||||
defer file.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn sliceToInt(comptime T: type, comptime E: type, slice: []const E) !T {
|
||||
const a = @typeInfo(T).Int.bits;
|
||||
const b = @typeInfo(E).Int.bits;
|
||||
if (a < b * slice.len) return error.Overflow;
|
||||
|
||||
var n: T = 0;
|
||||
for (slice, 0..) |item, i| {
|
||||
const shift = @intCast(std.math.Log2Int(T), b * (slice.len - 1 - i));
|
||||
n = n | (@as(T, item) << shift);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn fileList(alloc: std.mem.Allocator, dir: std.fs.IterableDir) ![]string {
|
||||
var list = std.ArrayList(string).init(alloc);
|
||||
defer list.deinit();
|
||||
|
||||
var walk = try dir.walk(alloc);
|
||||
defer walk.deinit();
|
||||
while (try walk.next()) |entry| {
|
||||
if (entry.kind != .file) continue;
|
||||
try list.append(try alloc.dupe(u8, entry.path));
|
||||
}
|
||||
return list.toOwnedSlice();
|
||||
}
|
||||
|
||||
pub fn dirSize(alloc: std.mem.Allocator, dir: std.fs.IterableDir) !u64 {
|
||||
var res: u64 = 0;
|
||||
|
||||
var walk = try dir.walk(alloc);
|
||||
defer walk.deinit();
|
||||
while (try walk.next()) |entry| {
|
||||
if (entry.kind != .File) continue;
|
||||
res += try fileSize(dir.dir, entry.path);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn fileSize(dir: std.fs.Dir, sub_path: string) !u64 {
|
||||
const f = try dir.openFile(sub_path, .{});
|
||||
defer f.close();
|
||||
const s = try f.stat();
|
||||
return s.size;
|
||||
}
|
||||
|
||||
pub fn hashFile(dir: std.fs.Dir, sub_path: string, comptime Algo: type) ![Algo.digest_length * 2]u8 {
|
||||
const file = try dir.openFile(sub_path, .{});
|
||||
defer file.close();
|
||||
var h = Algo.init(.{});
|
||||
var out: [Algo.digest_length]u8 = undefined;
|
||||
try pipe(file.reader(), h.writer());
|
||||
h.final(&out);
|
||||
var res: [Algo.digest_length * 2]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&res);
|
||||
try std.fmt.format(fbs.writer(), "{x}", .{std.fmt.fmtSliceHexLower(&out)});
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn pipe(reader_from: anytype, writer_to: anytype) !void {
|
||||
var buf: [std.mem.page_size]u8 = undefined;
|
||||
var fifo = std.fifo.LinearFifo(u8, .Slice).init(&buf);
|
||||
defer fifo.deinit();
|
||||
try fifo.pump(reader_from, writer_to);
|
||||
}
|
||||
|
||||
pub fn StringerJsonStringifyMixin(comptime S: type) type {
|
||||
return struct {
|
||||
pub fn jsonStringify(self: S, options: std.json.StringifyOptions, out_stream: anytype) !void {
|
||||
var buf: [1024]u8 = undefined;
|
||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
||||
const alloc = fba.allocator();
|
||||
var list = std.ArrayList(u8).init(alloc);
|
||||
errdefer list.deinit();
|
||||
const writer = list.writer();
|
||||
try writer.writeAll(try self.toString(alloc));
|
||||
try std.json.stringify(list.toOwnedSlice(), options, out_stream);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn TagNameJsonStringifyMixin(comptime S: type) type {
|
||||
return struct {
|
||||
pub fn jsonStringify(self: S, options: std.json.StringifyOptions, out_stream: anytype) !void {
|
||||
try std.json.stringify(@tagName(self), options, out_stream);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn countScalar(comptime T: type, haystack: []const T, needle: T) usize {
|
||||
var found: usize = 0;
|
||||
|
||||
for (haystack) |item| {
|
||||
if (item == needle) {
|
||||
found += 1;
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
pub fn ptrCast(comptime T: type, ptr: *anyopaque) *T {
|
||||
if (@alignOf(T) == 0) @compileError(@typeName(T));
|
||||
return @ptrCast(*T, @alignCast(@alignOf(T), ptr));
|
||||
}
|
||||
|
||||
pub fn ptrCastConst(comptime T: type, ptr: *const anyopaque) *const T {
|
||||
if (@alignOf(T) == 0) @compileError(@typeName(T));
|
||||
return @ptrCast(*const T, @alignCast(@alignOf(T), ptr));
|
||||
}
|
||||
|
||||
pub fn sortBy(comptime T: type, items: []T, comptime field: std.meta.FieldEnum(T)) void {
|
||||
std.mem.sort(T, items, {}, struct {
|
||||
fn f(_: void, lhs: T, rhs: T) bool {
|
||||
return @field(lhs, @tagName(field)) < @field(rhs, @tagName(field));
|
||||
}
|
||||
}.f);
|
||||
}
|
||||
|
||||
pub fn sortBySlice(comptime T: type, items: []T, comptime field: std.meta.FieldEnum(T)) void {
|
||||
std.mem.sort(T, items, {}, struct {
|
||||
fn f(_: void, lhs: T, rhs: T) bool {
|
||||
return lessThanSlice(std.meta.FieldType(T, field))({}, @field(lhs, @tagName(field)), @field(rhs, @tagName(field)));
|
||||
}
|
||||
}.f);
|
||||
}
|
||||
|
||||
pub fn lessThanSlice(comptime T: type) fn (void, T, T) bool {
|
||||
return struct {
|
||||
fn f(_: void, lhs: T, rhs: T) bool {
|
||||
const result = for (0..@min(lhs.len, rhs.len)) |i| {
|
||||
if (lhs[i] < rhs[i]) break true;
|
||||
if (lhs[i] > rhs[i]) break false;
|
||||
} else false;
|
||||
return result;
|
||||
}
|
||||
}.f;
|
||||
}
|
||||
|
||||
pub fn containsString(haystack: []const string, needle: string) bool {
|
||||
for (haystack) |item| {
|
||||
if (std.mem.eql(u8, item, needle)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn FieldsTuple(comptime T: type) type {
|
||||
const fields = std.meta.fields(T);
|
||||
var types: [fields.len]type = undefined;
|
||||
for (fields, 0..) |item, i| {
|
||||
types[i] = item.type;
|
||||
}
|
||||
return std.meta.Tuple(&types);
|
||||
}
|
||||
|
||||
pub fn positionalInit(comptime T: type, args: FieldsTuple(T)) T {
|
||||
var t: T = undefined;
|
||||
inline for (std.meta.fields(T), 0..) |field, i| {
|
||||
@field(t, field.name) = args[i];
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
pub fn d2index(d1len: usize, d1: usize, d2: usize) usize {
|
||||
return (d1len * d2) + d1;
|
||||
}
|
||||
|
||||
pub fn ensureFieldSubset(comptime L: type, comptime R: type) void {
|
||||
for (std.meta.fields(L)) |item| {
|
||||
if (!@hasField(R, item.name)) @compileError(std.fmt.comptimePrint("{s} is missing the {s} field from {s}", .{ R, item.name, L }));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fmtReplacer(bytes: string, from: u8, to: u8) std.fmt.Formatter(formatReplacer) {
|
||||
return .{ .data = .{ .bytes = bytes, .from = from, .to = to } };
|
||||
}
|
||||
|
||||
const ReplacerData = struct { bytes: string, from: u8, to: u8 };
|
||||
fn formatReplacer(self: ReplacerData, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
for (self.bytes) |c| {
|
||||
try writer.writeByte(if (c == self.from) self.to else @intCast(u8, c));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn randomBytes(comptime len: usize) [len]u8 {
|
||||
var bytes: [len]u8 = undefined;
|
||||
std.crypto.random.bytes(&bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
pub fn writeEnumBig(writer: anytype, comptime E: type, value: E) !void {
|
||||
try writer.writeIntBig(@typeInfo(E).Enum.tag_type, @enumToInt(value));
|
||||
}
|
||||
|
||||
pub fn readEnumBig(reader: anytype, comptime E: type) !E {
|
||||
return @intToEnum(E, try reader.readIntBig(@typeInfo(E).Enum.tag_type));
|
||||
}
|
||||
|
||||
pub fn readExpected(reader: anytype, expected: []const u8) !bool {
|
||||
for (expected) |item| {
|
||||
const actual = try reader.readByte();
|
||||
if (actual != item) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn readBytes(reader: anytype, comptime len: usize) ![len]u8 {
|
||||
var bytes: [len]u8 = undefined;
|
||||
assert(try reader.readAll(&bytes) == len);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
pub fn FixedMaxBuffer(comptime max_len: usize) type {
|
||||
return struct {
|
||||
buf: [max_len]u8,
|
||||
len: usize,
|
||||
pos: usize,
|
||||
|
||||
const Self = @This();
|
||||
pub const Reader = std.io.Reader(*Self, error{}, read);
|
||||
|
||||
pub fn init(r: anytype, runtime_len: usize) !Self {
|
||||
var fmr = Self{
|
||||
.buf = undefined,
|
||||
.len = runtime_len,
|
||||
.pos = 0,
|
||||
};
|
||||
_ = try r.readAll(fmr.buf[0..runtime_len]);
|
||||
return fmr;
|
||||
}
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
fn read(self: *Self, dest: []u8) error{}!usize {
|
||||
const buf = self.buf[0..self.len];
|
||||
const size = std.math.min(dest.len, buf.len - self.pos);
|
||||
const end = self.pos + size;
|
||||
std.mem.copy(u8, dest[0..size], buf[self.pos..end]);
|
||||
self.pos = end;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn readLen(self: *Self, len: usize) []const u8 {
|
||||
assert(self.pos + len <= self.len);
|
||||
defer self.pos += len;
|
||||
return self.buf[self.pos..][0..len];
|
||||
}
|
||||
|
||||
pub fn atEnd(self: *const Self) bool {
|
||||
return self.pos == self.len;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hashBytes(comptime Algo: type, bytes: []const u8) [Algo.digest_length]u8 {
|
||||
var h = Algo.init(.{});
|
||||
var out: [Algo.digest_length]u8 = undefined;
|
||||
h.update(bytes);
|
||||
h.final(&out);
|
||||
return out;
|
||||
}
|
||||
|
||||
pub fn readType(reader: anytype, comptime T: type, endian: std.builtin.Endian) !T {
|
||||
return switch (@typeInfo(T)) {
|
||||
.Struct => |t| {
|
||||
switch (t.layout) {
|
||||
.Auto, .Extern => {
|
||||
var s: T = undefined;
|
||||
inline for (std.meta.fields(T)) |field| {
|
||||
@field(s, field.name) = try readType(reader, field.type, endian);
|
||||
}
|
||||
return s;
|
||||
},
|
||||
.Packed => return @bitCast(T, try readType(reader, t.backing_integer.?, endian)),
|
||||
}
|
||||
},
|
||||
.Array => |t| {
|
||||
var s: T = undefined;
|
||||
for (0..t.len) |i| {
|
||||
s[i] = try readType(reader, t.child, endian);
|
||||
}
|
||||
return s;
|
||||
},
|
||||
.Int => try reader.readInt(T, endian),
|
||||
.Enum => |t| @intToEnum(T, try readType(reader, t.tag_type, endian)),
|
||||
else => |e| @compileError(@tagName(e)),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn indexBufferT(bytes: [*]const u8, comptime T: type, endian: std.builtin.Endian, idx: usize, max_len: usize) T {
|
||||
std.debug.assert(idx < max_len);
|
||||
var fbs = std.io.fixedBufferStream((bytes + (idx * @sizeOf(T)))[0..@sizeOf(T)]);
|
||||
return readType(fbs.reader(), T, endian) catch |err| switch (err) {
|
||||
error.EndOfStream => unreachable, // assert above has been violated
|
||||
};
|
||||
}
|
||||
|
||||
pub fn BufIndexer(comptime T: type, comptime endian: std.builtin.Endian) type {
|
||||
return struct {
|
||||
bytes: [*]const u8,
|
||||
max_len: usize,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(bytes: [*]const u8, max_len: usize) Self {
|
||||
return .{
|
||||
.bytes = bytes,
|
||||
.max_len = max_len,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn at(self: *const Self, idx: usize) T {
|
||||
return indexBufferT(self.bytes, T, endian, idx, self.max_len);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn skipToBoundary(pos: u64, boundary: u64, reader: anytype) !void {
|
||||
// const gdiff = counter.bytes_read % 4;
|
||||
// for (range(if (gdiff > 0) 4 - gdiff else 0)) |_| {
|
||||
const a = pos;
|
||||
const b = boundary;
|
||||
try reader.skipBytes(((a + (b - 1)) & ~(b - 1)) - a, .{});
|
||||
}
|
||||
|
||||
/// ?A == A fails
|
||||
/// ?A == @as(?A, b) works
|
||||
pub fn is(a: anytype, b: @TypeOf(a)) bool {
|
||||
return a == b;
|
||||
}
|
||||
|
||||
/// Allows u32 + i16 to work
|
||||
pub fn safeAdd(a: anytype, b: anytype) @TypeOf(a) {
|
||||
if (b >= 0) {
|
||||
return a + @intCast(@TypeOf(a), b);
|
||||
}
|
||||
return a - @intCast(@TypeOf(a), -b);
|
||||
}
|
||||
|
||||
pub fn readBytesAlloc(reader: anytype, alloc: std.mem.Allocator, len: usize) ![]u8 {
|
||||
var list = std.ArrayListUnmanaged(u8){};
|
||||
try list.ensureTotalCapacityPrecise(alloc, len);
|
||||
errdefer list.deinit(alloc);
|
||||
list.appendNTimesAssumeCapacity(0, len);
|
||||
try reader.readNoEof(list.items[0..len]);
|
||||
return list.items;
|
||||
}
|
||||
|
||||
pub fn readFile(dir: std.fs.Dir, sub_path: string, alloc: std.mem.Allocator) !string {
|
||||
_ = dir;
|
||||
_ = sub_path;
|
||||
_ = alloc;
|
||||
@compileError("use std.fs.Dir.readFileAlloc instead");
|
||||
}
|
||||
|
||||
pub fn nullifyS(s: ?string) ?string {
|
||||
if (s == null) return null;
|
||||
if (s.?.len == 0) return null;
|
||||
return s.?;
|
||||
}
|
||||
|
||||
pub fn sliceTo(comptime T: type, haystack: []const T, needle: T) []const T {
|
||||
if (std.mem.indexOfScalar(T, haystack, needle)) |index| {
|
||||
return haystack[0..index];
|
||||
}
|
||||
return haystack;
|
||||
}
|
||||
|
||||
pub fn matchesAll(comptime T: type, haystack: []const u8, comptime needle: fn (T) bool) bool {
|
||||
for (haystack) |c| {
|
||||
if (!needle(c)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn matchesAny(comptime T: type, haystack: []const u8, comptime needle: fn (T) bool) bool {
|
||||
for (haystack) |c| {
|
||||
if (needle(c)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn opslice(slice: anytype, index: usize) ?std.meta.Child(@TypeOf(slice)) {
|
||||
if (slice.len <= index) return null;
|
||||
return slice[index];
|
||||
}
|
||||
|
||||
pub fn assertLog(ok: bool, comptime message: string, args: anytype) void {
|
||||
if (!ok) std.log.err("assertion failure: " ++ message, args);
|
||||
if (!ok) unreachable; // assertion failure
|
||||
}
|
||||
|
||||
pub fn parse_json(alloc: std.mem.Allocator, input: string) !std.json.ValueTree {
|
||||
var p = std.json.Parser.init(alloc, .alloc_always);
|
||||
return try p.parse(input);
|
||||
}
|
||||
|
||||
pub fn isArrayOf(comptime T: type) std.meta.trait.TraitFn {
|
||||
const Closure = struct {
|
||||
pub fn trait(comptime C: type) bool {
|
||||
return switch (@typeInfo(C)) {
|
||||
.Array => |ti| ti.child == T,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
return Closure.trait;
|
||||
}
|
||||
|
||||
pub fn parse_int(comptime T: type, s: ?string, b: u8, d: T) T {
|
||||
if (s == null) return d;
|
||||
return std.fmt.parseInt(T, s.?, b) catch d;
|
||||
}
|
||||
|
||||
pub fn parse_bool(s: ?string) bool {
|
||||
return parse_int(u1, s, 10, 0) > 0;
|
||||
}
|
||||
|
||||
pub fn to_hex(array: anytype) [array.len * 2]u8 {
|
||||
var res: [array.len * 2]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&res);
|
||||
std.fmt.format(fbs.writer(), "{x}", .{std.fmt.fmtSliceHexLower(&array)}) catch unreachable;
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn FieldUnion(comptime T: type) type {
|
||||
const infos = std.meta.fields(T);
|
||||
|
||||
var fields: [infos.len]std.builtin.Type.UnionField = undefined;
|
||||
inline for (infos, 0..) |field, i| {
|
||||
fields[i] = .{
|
||||
.name = field.name,
|
||||
.type = field.type,
|
||||
.alignment = field.alignment,
|
||||
};
|
||||
}
|
||||
return @Type(std.builtin.Type{ .Union = .{
|
||||
.layout = .Auto,
|
||||
.tag_type = std.meta.FieldEnum(T),
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn LoggingReader(comptime T: type, comptime scope: @Type(.EnumLiteral)) type {
|
||||
return struct {
|
||||
child_stream: T,
|
||||
|
||||
pub const Error = T.Error;
|
||||
pub const Reader = std.io.Reader(Self, Error, read);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(child_stream: T) Self {
|
||||
return .{
|
||||
.child_stream = child_stream,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn reader(self: Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
fn read(self: Self, dest: []u8) Error!usize {
|
||||
const n = try self.child_stream.read(dest);
|
||||
std.log.scoped(scope).debug("{s}", .{dest[0..n]});
|
||||
return n;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn LoggingWriter(comptime T: type, comptime scope: @Type(.EnumLiteral)) type {
|
||||
return struct {
|
||||
child_stream: T,
|
||||
|
||||
pub const Error = T.Error;
|
||||
pub const Writer = std.io.Writer(Self, Error, write);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(child_stream: T) Self {
|
||||
return .{
|
||||
.child_stream = child_stream,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writer(self: Self) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
fn write(self: Self, bytes: []const u8) Error!usize {
|
||||
std.log.scoped(scope).debug("{s}", .{bytes});
|
||||
return self.child_stream.write(bytes);
|
||||
}
|
||||
};
|
||||
}
|
41
src/.deps/http.zig/build.zig
Normal file
41
src/.deps/http.zig/build.zig
Normal file
|
@ -0,0 +1,41 @@
|
|||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) !void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const httpz_module = b.addModule("httpz", .{
|
||||
.source_file = .{ .path = "src/httpz.zig" },
|
||||
});
|
||||
|
||||
// setup executable
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "http.zig demo",
|
||||
.root_source_file = .{ .path = "example/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
exe.addModule("httpz", httpz_module);
|
||||
b.installArtifact(exe);
|
||||
|
||||
const run_cmd = b.addRunArtifact(exe);
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
}
|
||||
|
||||
// setup tests
|
||||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const lib_test = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/httpz.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
const run_test = b.addRunArtifact(lib_test);
|
||||
run_test.has_side_effects = true;
|
||||
|
||||
const test_step = b.step("test", "Run tests");
|
||||
test_step.dependOn(&run_test.step);
|
||||
}
|
57
src/.deps/http.zig/example/dispatcher.zig
Normal file
57
src/.deps/http.zig/example/dispatcher.zig
Normal file
|
@ -0,0 +1,57 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("httpz");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
// Our global state (just like global.zig)
|
||||
const GlobalContext = struct {
|
||||
hits: usize = 0,
|
||||
l: std.Thread.Mutex = .{},
|
||||
};
|
||||
|
||||
// our per-request data
|
||||
const RequestContext = struct {
|
||||
user_id: ?[]const u8,
|
||||
global: *GlobalContext,
|
||||
};
|
||||
|
||||
pub fn start(allocator: Allocator) !void{
|
||||
var ctx = GlobalContext{};
|
||||
var server = try httpz.ServerCtx(*GlobalContext, *RequestContext).init(allocator, .{.pool_size = 10, .port = 5884}, &ctx);
|
||||
server.dispatcher(dispatcher);
|
||||
var router = server.router();
|
||||
router.get("/increment", increment);
|
||||
return server.listen();
|
||||
}
|
||||
|
||||
fn increment(_: *httpz.Request, res: *httpz.Response, ctx: *RequestContext) !void {
|
||||
// we don't actually do anything with ctx.user_id
|
||||
// except make sure it's been set. This could be common in a route
|
||||
// where any user can take an action as long as they're logged in.
|
||||
|
||||
if (ctx.user_id == null) return notAuthorized(res);
|
||||
|
||||
ctx.global.l.lock();
|
||||
var hits = ctx.global.hits + 1;
|
||||
ctx.global.hits = hits;
|
||||
ctx.global.l.unlock();
|
||||
|
||||
res.content_type = httpz.ContentType.TEXT;
|
||||
var out = try std.fmt.allocPrint(res.arena, "{d} hits", .{hits});
|
||||
res.body = out;
|
||||
}
|
||||
|
||||
fn notAuthorized(res: *httpz.Response) void {
|
||||
res.status = 401;
|
||||
res.body = "Not authorized";
|
||||
}
|
||||
|
||||
fn dispatcher(action: httpz.Action(*RequestContext), req: *httpz.Request, res: *httpz.Response, global: *GlobalContext) !void {
|
||||
// If we you need to allocate memory here, consider using req.arena
|
||||
|
||||
// this is obviously a dummy example where we just trust the "user" header
|
||||
var ctx = RequestContext{
|
||||
.global = global,
|
||||
.user_id = req.header("user"),
|
||||
};
|
||||
return action(req, res, &ctx);
|
||||
}
|
28
src/.deps/http.zig/example/global.zig
Normal file
28
src/.deps/http.zig/example/global.zig
Normal file
|
@ -0,0 +1,28 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("httpz");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
// Our global state
|
||||
const GlobalContext = struct {
|
||||
hits: usize = 0,
|
||||
l: std.Thread.Mutex = .{},
|
||||
};
|
||||
|
||||
pub fn start(allocator: Allocator) !void{
|
||||
var ctx = GlobalContext{};
|
||||
var server = try httpz.ServerCtx(*GlobalContext, *GlobalContext).init(allocator, .{.pool_size = 10, .port = 5883}, &ctx);
|
||||
var router = server.router();
|
||||
router.get("/increment", increment);
|
||||
return server.listen();
|
||||
}
|
||||
|
||||
fn increment(_: *httpz.Request, res: *httpz.Response, ctx: *GlobalContext) !void {
|
||||
ctx.l.lock();
|
||||
var hits = ctx.hits + 1;
|
||||
ctx.hits = hits;
|
||||
ctx.l.unlock();
|
||||
|
||||
res.content_type = httpz.ContentType.TEXT;
|
||||
var out = try std.fmt.allocPrint(res.arena, "{d} hits", .{hits});
|
||||
res.body = out;
|
||||
}
|
34
src/.deps/http.zig/example/main.zig
Normal file
34
src/.deps/http.zig/example/main.zig
Normal file
|
@ -0,0 +1,34 @@
|
|||
const std = @import("std");
|
||||
|
||||
const simple = @import("simple.zig");
|
||||
const global = @import("global.zig");
|
||||
const dispatcher = @import("dispatcher.zig");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
// This example starts 3 separate servers, each listening on a different
|
||||
// port.
|
||||
|
||||
// This first server, listening on 127.0.0.1:5882, is a simple server. It
|
||||
// best showcases the *http.Request and *http.Response APIs
|
||||
const t1 = try std.Thread.spawn(.{}, simple.start, .{allocator});
|
||||
|
||||
|
||||
// This second server, listening on 127.0.0.1:5883, has a global shared context.
|
||||
// It showcases how global data can be access from HTTP actions;
|
||||
const t2 = try std.Thread.spawn(.{}, global.start, .{allocator});
|
||||
|
||||
|
||||
// This third server, listening on 127.0.0.1:5884, has a global shared context
|
||||
// with a per-request context tied together using a custom dispatcher.
|
||||
const t3 = try std.Thread.spawn(.{}, dispatcher.start, .{allocator});
|
||||
|
||||
std.log.info("Three demo servers have been started. Please load http://127.0.0.1:5882", .{});
|
||||
|
||||
|
||||
t1.join(); t2.join(); t3.join();
|
||||
}
|
73
src/.deps/http.zig/example/simple.zig
Normal file
73
src/.deps/http.zig/example/simple.zig
Normal file
|
@ -0,0 +1,73 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("httpz");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub fn start(allocator: Allocator) !void{
|
||||
var server = try httpz.Server().init(allocator, .{.pool_size = 20});
|
||||
var router = server.router();
|
||||
|
||||
server.notFound(notFound);
|
||||
|
||||
router.get("/", index);
|
||||
router.get("/hello", hello);
|
||||
router.get("/json/hello/:name", json);
|
||||
router.get("/writer/hello/:name", writer);
|
||||
router.get("/chunked_response", chunked);
|
||||
try server.listen();
|
||||
}
|
||||
|
||||
fn index(_: *httpz.Request, res: *httpz.Response) !void {
|
||||
res.body = \\<!DOCTYPE html>
|
||||
\\ <ul>
|
||||
\\ <li><a href="/hello?name=Teg">Querystring + text output</a>
|
||||
\\ <li><a href="/writer/hello/Ghanima">Path parameter + serialize json object</a>
|
||||
\\ <li><a href="/json/hello/Duncan">Path parameter + json writer</a>
|
||||
\\ <li><a href="/chunked_response">Chunked response</a>
|
||||
\\ <li><a href="http://localhost:5883/increment">Global shared state</a>
|
||||
;
|
||||
}
|
||||
|
||||
fn hello(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const query = try req.query();
|
||||
const name = query.get("name") orelse "stranger";
|
||||
|
||||
// One solution is to use res.arena
|
||||
// var out = try std.fmt.allocPrint(res.arena, "Hello {s}", .{name});
|
||||
// res.body = out
|
||||
|
||||
// another is to use res.writer(), which might be more efficient in some cases
|
||||
try std.fmt.format(res.writer(), "Hello {s}", .{name});
|
||||
}
|
||||
|
||||
fn json(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const name = req.param("name").?;
|
||||
try res.json(.{.hello = name}, .{});
|
||||
}
|
||||
|
||||
fn writer(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
res.content_type = httpz.ContentType.JSON;
|
||||
|
||||
const name = req.param("name").?;
|
||||
var ws = std.json.writeStream(res.writer(), 4);
|
||||
try ws.beginObject();
|
||||
try ws.objectField("name");
|
||||
try ws.emitString(name);
|
||||
try ws.endObject();
|
||||
}
|
||||
|
||||
fn chunked(_: *httpz.Request, res: *httpz.Response) !void {
|
||||
// status and headers (including content type) must be set
|
||||
// before the first call to chunk
|
||||
res.status = 200;
|
||||
res.header("A", "Header");
|
||||
res.content_type = httpz.ContentType.TEXT;
|
||||
|
||||
try res.chunk("This is a chunk");
|
||||
try res.chunk("\r\n");
|
||||
try res.chunk("And another one");
|
||||
}
|
||||
|
||||
fn notFound(_: *httpz.Request, res: *httpz.Response) !void {
|
||||
res.status = 404;
|
||||
res.body = "Not found";
|
||||
}
|
10
src/.deps/http.zig/src/config.zig
Normal file
10
src/.deps/http.zig/src/config.zig
Normal file
|
@ -0,0 +1,10 @@
|
|||
const request = @import("request.zig");
|
||||
const response = @import("response.zig");
|
||||
|
||||
pub const Config = struct {
|
||||
port: u16 = 5882,
|
||||
pool_size: u16 = 100,
|
||||
address: []const u8 = "127.0.0.1",
|
||||
request: request.Config = request.Config{},
|
||||
response: response.Config = response.Config{},
|
||||
};
|
441
src/.deps/http.zig/src/httpz.zig
Normal file
441
src/.deps/http.zig/src/httpz.zig
Normal file
|
@ -0,0 +1,441 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const t = @import("t.zig");
|
||||
pub const testing = @import("testing.zig");
|
||||
|
||||
pub const routing = @import("router.zig");
|
||||
pub const request = @import("request.zig");
|
||||
pub const listener = @import("listener.zig");
|
||||
pub const response = @import("response.zig");
|
||||
|
||||
pub const Router = routing.Router;
|
||||
pub const Request = request.Request;
|
||||
pub const Response = response.Response;
|
||||
pub const Url = @import("url.zig").Url;
|
||||
pub const Config = @import("config.zig").Config;
|
||||
const Stream = if (builtin.is_test) *t.Stream else std.net.Stream;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub const Protocol = enum {
|
||||
HTTP10,
|
||||
HTTP11,
|
||||
};
|
||||
|
||||
pub const Method = enum {
|
||||
GET,
|
||||
HEAD,
|
||||
POST,
|
||||
PUT,
|
||||
PATCH,
|
||||
DELETE,
|
||||
OPTIONS,
|
||||
};
|
||||
|
||||
pub const ContentType = enum {
|
||||
BINARY,
|
||||
CSS,
|
||||
CSV,
|
||||
GIF,
|
||||
GZ,
|
||||
HTML,
|
||||
ICO,
|
||||
JPG,
|
||||
JS,
|
||||
JSON,
|
||||
PDF,
|
||||
PNG,
|
||||
SVG,
|
||||
TAR,
|
||||
TEXT,
|
||||
WEBP,
|
||||
XML,
|
||||
};
|
||||
|
||||
pub fn Action(comptime G: type) type {
|
||||
if (G == void) {
|
||||
return *const fn(*Request, *Response) anyerror!void;
|
||||
}
|
||||
return *const fn(*Request, *Response, G) anyerror!void;
|
||||
}
|
||||
|
||||
pub fn Dispatcher(comptime G: type, comptime R: type) type {
|
||||
if (G == void and R == void) {
|
||||
return *const fn(Action(void), *Request, *Response) anyerror!void;
|
||||
} else if (G == void) {
|
||||
return *const fn(Action(R), *Request, *Response) anyerror!void;
|
||||
} else if (R == void) {
|
||||
return *const fn(Action(G), *Request, *Response, G) anyerror!void;
|
||||
}
|
||||
return *const fn(Action(R), *Request, *Response, G) anyerror!void;
|
||||
}
|
||||
|
||||
pub fn DispatchableAction(comptime G: type, comptime R: type) type {
|
||||
return struct {
|
||||
action: Action(R),
|
||||
dispatcher: Dispatcher(G, R),
|
||||
};
|
||||
}
|
||||
|
||||
fn ErrorHandlerAction(comptime G: type) type {
|
||||
if (G == void) {
|
||||
return *const fn(*Request, *Response, anyerror) void;
|
||||
}
|
||||
return *const fn(*Request, *Response, anyerror, G) void;
|
||||
}
|
||||
|
||||
// Done this way so that Server and ServerCtx have a similar API
|
||||
pub fn Server() type {
|
||||
return struct {
|
||||
pub fn init(allocator: Allocator, config: Config) !ServerCtx(void, void) {
|
||||
return try ServerCtx(void, void).init(allocator, config, {});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ServerCtx(comptime G: type, comptime R: type) type {
|
||||
return struct {
|
||||
ctx: G,
|
||||
config: Config,
|
||||
app_allocator: Allocator,
|
||||
httpz_allocator: Allocator,
|
||||
_router: Router(G, R),
|
||||
_errorHandler: ErrorHandlerAction(G),
|
||||
_notFoundHandler: Action(G),
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, config: Config, ctx: G) !Self {
|
||||
const nfh = if (comptime G == void) defaultNotFound else defaultNotFoundWithContext;
|
||||
const erh = if (comptime G == void) defaultErrorHandler else defaultErrorHandlerWithContext;
|
||||
const dd = if (comptime G == void) defaultDispatcher else defaultDispatcherWithContext;
|
||||
|
||||
return .{
|
||||
.ctx = ctx,
|
||||
.config = config,
|
||||
.app_allocator = allocator,
|
||||
.httpz_allocator = allocator,
|
||||
._errorHandler = erh,
|
||||
._notFoundHandler = nfh,
|
||||
._router = try Router(G, R).init(allocator, dd),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self._router.deinit();
|
||||
}
|
||||
|
||||
pub fn listen(self: *Self) !void {
|
||||
try listener.listen(*ServerCtx(G, R), self.httpz_allocator, self.app_allocator, self, self.config);
|
||||
}
|
||||
|
||||
pub fn listenInNewThread(self: *Self) !std.Thread {
|
||||
return try std.Thread.spawn(.{}, listen, .{self});
|
||||
}
|
||||
|
||||
pub fn notFound(self: *Self, nfa: Action(G)) void {
|
||||
self._notFoundHandler = nfa;
|
||||
}
|
||||
|
||||
pub fn errorHandler(self: *Self, eha: ErrorHandlerAction(G)) void {
|
||||
self._errorHandler = eha;
|
||||
}
|
||||
|
||||
pub fn dispatcher(self: *Self, d: Dispatcher(G, R)) void {
|
||||
(&self._router).dispatcher(d);
|
||||
}
|
||||
|
||||
pub fn router(self: *Self) *Router(G, R) {
|
||||
return &self._router;
|
||||
}
|
||||
|
||||
fn defaultNotFoundWithContext(req: *Request, res: *Response, _: G) !void{
|
||||
try defaultNotFound(req, res);
|
||||
}
|
||||
|
||||
fn defaultNotFound(_: *Request, res: *Response) !void {
|
||||
res.status = 404;
|
||||
res.body = "Not Found";
|
||||
}
|
||||
|
||||
fn defaultErrorHandlerWithContext(req: *Request, res: *Response, err: anyerror, _: G) void {
|
||||
defaultErrorHandler(req, res, err);
|
||||
}
|
||||
|
||||
fn defaultErrorHandler(req: *Request, res: *Response, err: anyerror) void {
|
||||
res.status = 500;
|
||||
res.body = "Internal Server Error";
|
||||
std.log.warn("httpz: unhandled exception for request: {s}\nErr: {}", .{req.url.raw, err});
|
||||
}
|
||||
|
||||
fn defaultDispatcher(action: Action(R), req: *Request, res: *Response) !void {
|
||||
try action(req, res);
|
||||
}
|
||||
|
||||
fn defaultDispatcherWithContext(action: Action(R), req: *Request, res: *Response, ctx: G) !void {
|
||||
if (R == G) {
|
||||
return action(req, res, ctx);
|
||||
}
|
||||
// app needs to provide a dispatcher in this case
|
||||
return error.CannotDispatch;
|
||||
}
|
||||
|
||||
pub fn handle(self: Self, req: *Request, res: *Response) bool {
|
||||
const da = self._router.route(req.method, req.url.path, &req.params);
|
||||
self.dispatch(da, req, res) catch |err| switch (err) {
|
||||
error.BodyTooBig => {
|
||||
res.status = 431;
|
||||
res.body = "Request body is too big";
|
||||
res.write() catch return false;
|
||||
},
|
||||
else => {
|
||||
if (comptime G == void) {
|
||||
self._errorHandler(req, res, err);
|
||||
} else {
|
||||
self._errorHandler(req, res, err, self.ctx);
|
||||
}
|
||||
}
|
||||
};
|
||||
res.write() catch return false;
|
||||
return req.canKeepAlive();
|
||||
}
|
||||
|
||||
inline fn dispatch(self: Self, dispatchable_action: ?DispatchableAction(G, R), req: *Request, res: *Response) !void {
|
||||
if (dispatchable_action) |da| {
|
||||
if (G == void) {
|
||||
return da.dispatcher(da.action, req, res);
|
||||
}
|
||||
return da.dispatcher(da.action, req, res, self.ctx);
|
||||
}
|
||||
|
||||
if (G == void) {
|
||||
return self._notFoundHandler(req, res);
|
||||
}
|
||||
return self._notFoundHandler(req, res, self.ctx);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test {
|
||||
std.testing.refAllDecls(@This());
|
||||
}
|
||||
|
||||
test "httpz: invalid request (not enough data, assume closed)" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET / HTTP/1.1\r");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 1) catch unreachable;
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectEqual(true, stream.closed);
|
||||
try t.expectEqual(@as(usize, 0), stream.received.items.len);
|
||||
}
|
||||
|
||||
test "httpz: invalid request" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("TEA / HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 1) catch unreachable;
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 400\r\nContent-Length: 15\r\n\r\nInvalid Request", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: no route" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET / HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 1) catch unreachable;
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 404\r\nContent-Length: 9\r\n\r\nNot Found", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: no route with custom notFound handler" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET / HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 3) catch unreachable;
|
||||
srv.notFound(testNotFound);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 404\r\nCtx: 3\r\nContent-Length: 10\r\n\r\nwhere lah?", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: unhandled exception" {
|
||||
std.testing.log_level = .err;
|
||||
defer std.testing.log_level = .warn;
|
||||
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /fail HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 5) catch unreachable;
|
||||
srv.router().get("/fail", testFail);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 500\r\nContent-Length: 21\r\n\r\nInternal Server Error", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: unhandled exception with custom error handler" {
|
||||
std.testing.log_level = .err;
|
||||
defer std.testing.log_level = .warn;
|
||||
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /fail HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 4) catch unreachable;
|
||||
srv.errorHandler(testErrorHandler);
|
||||
srv.router().get("/fail", testFail);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 500\r\nCtx: 4\r\nContent-Length: 29\r\n\r\n#/why/arent/tags/hierarchical", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: route params" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /api/v2/users/9001 HTTP/1.1\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 1) catch unreachable;
|
||||
srv.router().all("/api/:version/users/:UserId", testParams);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Length: 20\r\n\r\nversion=v2,user=9001", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: request and response headers" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /test/headers HTTP/1.1\r\nHeader-Name: Header-Value\r\n\r\n");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 88) catch unreachable;
|
||||
srv.router().get("/test/headers", testHeaders);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 200\r\nCtx: 88\r\nEcho: Header-Value\r\nother: test-value\r\nContent-Length: 0\r\n\r\n", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: content-length body" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /test/body/cl HTTP/1.1\r\nHeader-Name: Header-Value\r\nContent-Length: 4\r\n\r\nabcz");
|
||||
|
||||
var srv = ServerCtx(u32, u32).init(t.allocator, .{}, 1) catch unreachable;
|
||||
srv.router().get("/test/body/cl", testCLBody);
|
||||
testRequest(u32, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 200\r\nEcho-Body: abcz\r\nContent-Length: 0\r\n\r\n", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: json response" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /test/json HTTP/1.1\r\nContent-Length: 0\r\n\r\n");
|
||||
|
||||
var srv = Server().init(t.allocator, .{}) catch unreachable;
|
||||
srv.router().get("/test/json", testJsonRes);
|
||||
testRequest(void, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 201\r\nContent-Type: application/json\r\nContent-Length: 26\r\n\r\n{\"over\":9000,\"teg\":\"soup\"}", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: query" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
_ = stream.add("GET /test/query?fav=keemun%20te%61%21 HTTP/1.1\r\nContent-Length: 0\r\n\r\n");
|
||||
|
||||
var srv = Server().init(t.allocator, .{}) catch unreachable;
|
||||
srv.router().get("/test/query", testReqQuery);
|
||||
testRequest(void, &srv, stream);
|
||||
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Length: 11\r\n\r\nkeemun tea!", stream.received.items);
|
||||
}
|
||||
|
||||
test "httpz: custom dispatcher" {
|
||||
var stream = t.Stream.init();
|
||||
defer stream.deinit();
|
||||
|
||||
var srv = Server().init(t.allocator, .{}) catch unreachable;
|
||||
var router = srv.router();
|
||||
router.allC("/test/dispatcher", testDispatcherAction, .{.dispatcher = testDispatcher});
|
||||
|
||||
_ = stream.add("HEAD /test/dispatcher HTTP/1.1\r\n\r\n");
|
||||
testRequest(void, &srv, stream);
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Length: 17\r\n\r\ndispatcher-action", stream.received.items);
|
||||
}
|
||||
|
||||
fn testRequest(comptime G: type, srv: *ServerCtx(G, G), stream: *t.Stream) void {
|
||||
var reqResPool = listener.initReqResPool(t.allocator, t.allocator, .{
|
||||
.pool_size = 2,
|
||||
.request = .{.buffer_size = 4096},
|
||||
.response = .{.body_buffer_size = 4096},
|
||||
}) catch unreachable;
|
||||
defer reqResPool.deinit();
|
||||
defer srv.deinit();
|
||||
listener.handleConnection(*ServerCtx(G, G), srv, stream, &reqResPool);
|
||||
}
|
||||
|
||||
fn testFail(_: *Request, _: *Response, _: u32) !void {
|
||||
return error.TestUnhandledError;
|
||||
}
|
||||
|
||||
fn testParams(req: *Request, res: *Response, _: u32) !void {
|
||||
var args = .{req.param("version").?, req.param("UserId").?};
|
||||
var out = try std.fmt.allocPrint(req.arena, "version={s},user={s}", args);
|
||||
res.body = out;
|
||||
}
|
||||
|
||||
fn testHeaders(req: *Request, res: *Response, ctx: u32) !void {
|
||||
addContextHeader(res, ctx);
|
||||
res.header("Echo", req.header("header-name").?);
|
||||
res.header("other", "test-value");
|
||||
}
|
||||
|
||||
fn testCLBody(req: *Request, res: *Response, _: u32) !void {
|
||||
const body = try req.body();
|
||||
res.header("Echo-Body", body.?);
|
||||
}
|
||||
|
||||
fn testJsonRes(_: *Request, res: *Response) !void {
|
||||
res.status = 201;
|
||||
try res.json(.{.over = 9000, .teg = "soup"}, .{});
|
||||
}
|
||||
|
||||
fn testReqQuery(req: *Request, res: *Response) !void {
|
||||
res.status = 200;
|
||||
const query = try req.query();
|
||||
res.body = query.get("fav").?;
|
||||
}
|
||||
|
||||
fn testNotFound(_: *Request, res: *Response, ctx: u32) !void {
|
||||
res.status = 404;
|
||||
addContextHeader(res, ctx);
|
||||
res.body = "where lah?";
|
||||
}
|
||||
|
||||
fn testErrorHandler(_: *Request, res: *Response, _: anyerror, ctx: u32) void {
|
||||
res.status = 500;
|
||||
addContextHeader(res, ctx);
|
||||
res.body = "#/why/arent/tags/hierarchical";
|
||||
}
|
||||
|
||||
fn addContextHeader(res: *Response, ctx: u32) void {
|
||||
const value = std.fmt.allocPrint(res.arena, "{d}", .{ctx}) catch unreachable;
|
||||
res.header("Ctx", value);
|
||||
}
|
||||
|
||||
fn testDispatcherAction(_: *Request, res: *Response) !void {
|
||||
return res.directWriter().writeAll("action");
|
||||
}
|
||||
|
||||
fn testDispatcher(action: Action(void), req: *Request, res: *Response) !void {
|
||||
try res.directWriter().writeAll("dispatcher-");
|
||||
return action(req, res);
|
||||
}
|
91
src/.deps/http.zig/src/key_value.zig
Normal file
91
src/.deps/http.zig/src/key_value.zig
Normal file
|
@ -0,0 +1,91 @@
|
|||
const std = @import("std");
|
||||
const t = @import("t.zig");
|
||||
|
||||
const mem = std.mem;
|
||||
const ascii = std.ascii;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub const KeyValue = struct {
|
||||
len: usize,
|
||||
keys: [][]const u8,
|
||||
values: [][]const u8,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, max: usize) !Self {
|
||||
const keys = try allocator.alloc([]const u8, max);
|
||||
const values = try allocator.alloc([]const u8, max);
|
||||
return Self{
|
||||
.len = 0,
|
||||
.keys = keys,
|
||||
.values = values,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
allocator.free(self.keys);
|
||||
allocator.free(self.values);
|
||||
}
|
||||
|
||||
pub fn add(self: *Self, key: []const u8, value: []const u8) void {
|
||||
const len = self.len;
|
||||
var keys = self.keys;
|
||||
if (len == keys.len) {
|
||||
return;
|
||||
}
|
||||
|
||||
keys[len] = key;
|
||||
self.values[len] = value;
|
||||
self.len = len + 1;
|
||||
}
|
||||
|
||||
pub fn get(self: Self, needle: []const u8) ?[]const u8 {
|
||||
const keys = self.keys[0..self.len];
|
||||
for (keys, 0..) |key, i| {
|
||||
if (mem.eql(u8, key, needle)) {
|
||||
return self.values[i];
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.len = 0;
|
||||
}
|
||||
};
|
||||
|
||||
test "key_value: get" {
|
||||
var allocator = t.allocator;
|
||||
var kv = try KeyValue.init(allocator, 2);
|
||||
var key = "content-type".*;
|
||||
kv.add(&key, "application/json");
|
||||
|
||||
try t.expectEqual(@as(?[]const u8, "application/json"), kv.get("content-type"));
|
||||
|
||||
kv.reset();
|
||||
try t.expectEqual(@as(?[]const u8, null), kv.get("content-type"));
|
||||
kv.add(&key, "application/json2");
|
||||
try t.expectEqual(@as(?[]const u8, "application/json2"), kv.get("content-type"));
|
||||
|
||||
kv.deinit(t.allocator);
|
||||
// allocator.free(key);
|
||||
}
|
||||
|
||||
test "key_value: ignores beyond max" {
|
||||
var kv = try KeyValue.init(t.allocator, 2);
|
||||
var n1 = "content-length".*;
|
||||
kv.add(&n1, "cl");
|
||||
|
||||
var n2 = "host".*;
|
||||
kv.add(&n2, "www");
|
||||
|
||||
var n3 = "authorization".*;
|
||||
kv.add(&n3, "hack");
|
||||
|
||||
try t.expectEqual(@as(?[]const u8, "cl"), kv.get("content-length"));
|
||||
try t.expectEqual(@as(?[]const u8, "www"), kv.get("host"));
|
||||
try t.expectEqual(@as(?[]const u8, null), kv.get("authorization"));
|
||||
|
||||
kv.deinit(t.allocator);
|
||||
}
|
173
src/.deps/http.zig/src/listener.zig
Normal file
173
src/.deps/http.zig/src/listener.zig
Normal file
|
@ -0,0 +1,173 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const t = @import("t.zig");
|
||||
const httpz = @import("httpz.zig");
|
||||
|
||||
const Pool = @import("pool.zig").Pool;
|
||||
const Config = @import("config.zig").Config;
|
||||
|
||||
const Loop = std.event.Loop;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Stream = if (builtin.is_test) *t.Stream else std.net.Stream;
|
||||
const Conn = if (builtin.is_test) *t.Stream else std.net.StreamServer.Connection;
|
||||
|
||||
const os = std.os;
|
||||
const net = std.net;
|
||||
|
||||
const ReqResPool = Pool(*RequestResponsePair, RequestResponsePairConfig);
|
||||
|
||||
pub fn listen(comptime S: type, httpz_allocator: Allocator, app_allocator: Allocator, server: S, config: Config) !void {
|
||||
var reqResPool = try initReqResPool(httpz_allocator, app_allocator, config);
|
||||
var socket = net.StreamServer.init(.{
|
||||
.reuse_address = true,
|
||||
.kernel_backlog = 1024,
|
||||
});
|
||||
defer socket.deinit();
|
||||
|
||||
const listen_port = config.port;
|
||||
const listen_address = config.address;
|
||||
try socket.listen(net.Address.parseIp(listen_address, listen_port) catch unreachable);
|
||||
|
||||
// TODO: I believe this should work, but it currently doesn't on 0.11-dev. Instead I have to
|
||||
// hardcode 1 for the setsocopt NODELAY option
|
||||
// if (@hasDecl(os.TCP, "NODELAY")) {
|
||||
// try os.setsockopt(socket.sockfd.?, os.IPPROTO.TCP, os.TCP.NODELAY, &std.mem.toBytes(@as(c_int, 1)));
|
||||
// }
|
||||
try os.setsockopt(socket.sockfd.?, os.IPPROTO.TCP, 1, &std.mem.toBytes(@as(c_int, 1)));
|
||||
|
||||
while (true) {
|
||||
if (socket.accept()) |conn| {
|
||||
const c: Conn = if (comptime builtin.is_test) undefined else conn;
|
||||
const args = .{S, server, c, &reqResPool};
|
||||
if (comptime std.io.is_async) {
|
||||
try Loop.instance.?.runDetached(httpz_allocator, handleConnection, args);
|
||||
} else {
|
||||
const thrd = try std.Thread.spawn(.{}, handleConnection, args);
|
||||
thrd.detach();
|
||||
}
|
||||
} else |err| {
|
||||
std.log.err("failed to accept connection {}", .{err});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initReqResPool(httpz_allocator: Allocator, app_allocator: Allocator, config: Config) !ReqResPool {
|
||||
return try ReqResPool.init(httpz_allocator, config.pool_size, initReqRes, .{
|
||||
.config = config,
|
||||
.app_allocator = app_allocator,
|
||||
.httpz_allocator = httpz_allocator,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn handleConnection(comptime S: type, server: S, conn: Conn, reqResPool: *ReqResPool) void {
|
||||
const stream = if (comptime builtin.is_test) conn else conn.stream;
|
||||
defer stream.close();
|
||||
|
||||
const reqResPair = reqResPool.acquire() catch |err| {
|
||||
std.log.err("failed to acquire request and response object from the pool {}", .{err});
|
||||
return;
|
||||
};
|
||||
defer reqResPool.release(reqResPair);
|
||||
|
||||
const req = reqResPair.request;
|
||||
const res = reqResPair.response;
|
||||
var arena = reqResPair.arena;
|
||||
|
||||
req.stream = stream;
|
||||
res.stream = stream;
|
||||
defer _ = arena.reset(.free_all);
|
||||
|
||||
while (true) {
|
||||
req.reset();
|
||||
res.reset();
|
||||
// TODO: this does not work, if you keep trying to use the arena allocator
|
||||
// after this, you'll get a segfault. It can take multiple hits before it
|
||||
// happens, but it will happen.
|
||||
// defer _ = arena.reset(.{.retain_with_limit = 8192});
|
||||
|
||||
if (req.parse()) {
|
||||
if (!server.handle(req, res)) {
|
||||
return;
|
||||
}
|
||||
} else |err| {
|
||||
// hard to keep this request alive on a parseError since in a lot of
|
||||
// failure cases, it's unclear where 1 request stops and another starts.
|
||||
requestParseError(err, res);
|
||||
return;
|
||||
}
|
||||
req.drain() catch { return; };
|
||||
}
|
||||
}
|
||||
|
||||
fn requestParseError(err: anyerror, res: *httpz.Response) void {
|
||||
switch (err) {
|
||||
error.UnknownMethod, error.InvalidRequestTarget, error.UnknownProtocol, error.UnsupportedProtocol, error.InvalidHeaderLine => {
|
||||
res.status = 400;
|
||||
res.body = "Invalid Request";
|
||||
res.write() catch {};
|
||||
},
|
||||
error.HeaderTooBig => {
|
||||
res.status = 431;
|
||||
res.body = "Request header is too big";
|
||||
res.write() catch {};
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
// We pair together requests and responses, not because they're tightly coupled,
|
||||
// but so that we have a 1 pool instead of 2, and thus have half the locking.
|
||||
// Also, both the request and response can require dynamic memory allocation.
|
||||
// Grouping them this way means we can create 1 arena per pair.
|
||||
const RequestResponsePair = struct{
|
||||
allocator: Allocator,
|
||||
request: *httpz.Request,
|
||||
response: *httpz.Response,
|
||||
arena: *std.heap.ArenaAllocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn deinit(self: *Self, httpz_allocator: Allocator) void {
|
||||
self.request.deinit(httpz_allocator);
|
||||
httpz_allocator.destroy(self.request);
|
||||
|
||||
self.response.deinit(httpz_allocator);
|
||||
httpz_allocator.destroy(self.response);
|
||||
self.arena.deinit();
|
||||
httpz_allocator.destroy(self.arena);
|
||||
}
|
||||
};
|
||||
|
||||
const RequestResponsePairConfig = struct {
|
||||
config: Config,
|
||||
app_allocator: Allocator,
|
||||
httpz_allocator: Allocator,
|
||||
};
|
||||
|
||||
// Should not be called directly, but initialized through a pool
|
||||
pub fn initReqRes(c: RequestResponsePairConfig) !*RequestResponsePair {
|
||||
const httpz_allocator = c.httpz_allocator;
|
||||
|
||||
var arena = try httpz_allocator.create(std.heap.ArenaAllocator);
|
||||
arena.* = std.heap.ArenaAllocator.init(c.app_allocator);
|
||||
const app_allocator = arena.allocator();
|
||||
|
||||
var req = try httpz_allocator.create(httpz.Request);
|
||||
try req.init(httpz_allocator, app_allocator, c.config.request);
|
||||
|
||||
var res = try httpz_allocator.create(httpz.Response);
|
||||
try res.init(httpz_allocator, app_allocator, c.config.response);
|
||||
|
||||
var pair = try httpz_allocator.create(RequestResponsePair);
|
||||
pair.* = .{
|
||||
.arena = arena,
|
||||
.request = req,
|
||||
.response = res,
|
||||
.allocator = app_allocator,
|
||||
};
|
||||
|
||||
return pair;
|
||||
}
|
||||
|
||||
// All of this logic is largely tested in httpz.zig
|
94
src/.deps/http.zig/src/params.zig
Normal file
94
src/.deps/http.zig/src/params.zig
Normal file
|
@ -0,0 +1,94 @@
|
|||
const std = @import("std");
|
||||
const t = @import("t.zig");
|
||||
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
// Similar to KeyValue with two important differences
|
||||
// 1 - We don't need to normalize (i.e. lowercase) the names, because they're
|
||||
// statically defined in code, and presumably, if the param is called "id"
|
||||
// then the developer will also fetch it as "id"
|
||||
// 2 - This is populated from Router, and the way router works is that it knows
|
||||
// the values before it knows the names. The addValue and addNames
|
||||
// methods reflect how Router uses this.
|
||||
pub const Params = struct {
|
||||
len: usize,
|
||||
names: [][]const u8,
|
||||
values: [][]const u8,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, max: usize) !Self {
|
||||
const names = try allocator.alloc([]const u8, max);
|
||||
const values = try allocator.alloc([]const u8, max);
|
||||
return Self{
|
||||
.len = 0,
|
||||
.names = names,
|
||||
.values = values,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
allocator.free(self.names);
|
||||
allocator.free(self.values);
|
||||
}
|
||||
|
||||
pub fn addValue(self: *Self, value: []const u8) void {
|
||||
const len = self.len;
|
||||
const values = self.values;
|
||||
if (len == values.len) {
|
||||
return;
|
||||
}
|
||||
values[len] = value;
|
||||
self.len = len + 1;
|
||||
}
|
||||
|
||||
// It should be impossible for names.len != self.len at this point, but it's
|
||||
// a bit dangerous to assume that since self.names is re-used between requests
|
||||
// and we don't want to leak anything, so I think enforcing a len of names.len
|
||||
// is safer, since names is generally statically defined based on routes setup.
|
||||
pub fn addNames(self: *Self, names: [][]const u8) void {
|
||||
std.debug.assert(names.len == self.len);
|
||||
const n = self.names;
|
||||
for (names, 0..) |name, i| {
|
||||
n[i] = name;
|
||||
}
|
||||
self.len = names.len;
|
||||
}
|
||||
|
||||
pub fn get(self: *Self, needle: []const u8) ?[]const u8 {
|
||||
const names = self.names[0..self.len];
|
||||
for (names, 0..) |name, i| {
|
||||
if (mem.eql(u8, name, needle)) {
|
||||
return self.values[i];
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.len = 0;
|
||||
}
|
||||
};
|
||||
|
||||
test "params: get" {
|
||||
var allocator = t.allocator;
|
||||
var params = try Params.init(allocator, 10);
|
||||
var names = [_][]const u8{"over", "duncan"};
|
||||
params.addValue("9000");
|
||||
params.addValue("idaho");
|
||||
params.addNames(names[0..]);
|
||||
|
||||
try t.expectEqual(@as(?[]const u8, "9000"), params.get("over"));
|
||||
try t.expectEqual(@as(?[]const u8, "idaho"), params.get("duncan"));
|
||||
|
||||
params.reset();
|
||||
try t.expectEqual(@as(?[]const u8, null), params.get("over"));
|
||||
try t.expectEqual(@as(?[]const u8, null), params.get("duncan"));
|
||||
params.addValue("!9000!");
|
||||
params.addNames(names[0..1]);
|
||||
try t.expectEqual(@as(?[]const u8, "!9000!"), params.get("over"));
|
||||
|
||||
params.deinit(t.allocator);
|
||||
}
|
155
src/.deps/http.zig/src/pool.zig
Normal file
155
src/.deps/http.zig/src/pool.zig
Normal file
|
@ -0,0 +1,155 @@
|
|||
const std = @import("std");
|
||||
const t = @import("t.zig");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub fn Pool(comptime E: type, comptime S: type) type {
|
||||
const initFnPtr = *const fn (S) anyerror!E;
|
||||
|
||||
return struct {
|
||||
items: []E,
|
||||
available: usize,
|
||||
allocator: Allocator,
|
||||
initFn: initFnPtr,
|
||||
initState: S,
|
||||
mutex: std.Thread.Mutex,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, size: usize, initFn: initFnPtr, initState: S) !Self {
|
||||
const items = try allocator.alloc(E, size);
|
||||
|
||||
for (0..size) |i| {
|
||||
items[i] = try initFn(initState);
|
||||
}
|
||||
|
||||
return Self{
|
||||
.items = items,
|
||||
.initFn = initFn,
|
||||
.initState = initState,
|
||||
.available = size,
|
||||
.allocator = allocator,
|
||||
.mutex = .{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
const allocator = self.allocator;
|
||||
for (self.items) |e| {
|
||||
e.deinit(allocator);
|
||||
allocator.destroy(e);
|
||||
}
|
||||
allocator.free(self.items);
|
||||
}
|
||||
|
||||
pub fn acquire(self: *Self) !E {
|
||||
const items = self.items;
|
||||
self.mutex.lock();
|
||||
const available = self.available;
|
||||
if (available == 0) {
|
||||
self.mutex.unlock();
|
||||
return try self.initFn(self.initState);
|
||||
}
|
||||
defer self.mutex.unlock();
|
||||
const new_available = available - 1;
|
||||
self.available = new_available;
|
||||
return items[new_available];
|
||||
}
|
||||
|
||||
pub fn release(self: *Self, e: E) void {
|
||||
const items = self.items;
|
||||
|
||||
self.mutex.lock();
|
||||
const available = self.available;
|
||||
|
||||
if (available == items.len) {
|
||||
self.mutex.unlock();
|
||||
const allocator = self.allocator;
|
||||
e.deinit(allocator);
|
||||
allocator.destroy(e);
|
||||
return;
|
||||
}
|
||||
|
||||
defer self.mutex.unlock();
|
||||
items[available] = e;
|
||||
self.available = available + 1;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
var id: i32 = 0;
|
||||
const TestEntry = struct {
|
||||
id: i32,
|
||||
acquired: bool,
|
||||
deinited: bool,
|
||||
|
||||
pub fn init(incr: i32) !*TestEntry {
|
||||
id += incr;
|
||||
var entry = try t.allocator.create(TestEntry);
|
||||
entry.id = id;
|
||||
entry.acquired = false;
|
||||
return entry;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *TestEntry, _: Allocator) void {
|
||||
self.deinited = true;
|
||||
}
|
||||
};
|
||||
|
||||
test "pool: acquires & release" {
|
||||
id = 0;
|
||||
var p = try Pool(*TestEntry, i32).init(t.allocator, 2, TestEntry.init, 5);
|
||||
defer p.deinit();
|
||||
|
||||
var e1 = try p.acquire();
|
||||
try t.expectEqual(@as(i32, 10), e1.id);
|
||||
try t.expectEqual(false, e1.deinited);
|
||||
|
||||
var e2 = try p.acquire();
|
||||
try t.expectEqual(@as(i32, 5), e2.id);
|
||||
try t.expectEqual(false, e2.deinited);
|
||||
|
||||
var e3 = try p.acquire();
|
||||
try t.expectEqual(@as(i32, 15), e3.id);
|
||||
try t.expectEqual(false, e3.deinited);
|
||||
|
||||
// released first, so back in the pool
|
||||
p.release(e3);
|
||||
try t.expectEqual(@as(i32, 15), e3.id);
|
||||
try t.expectEqual(false, e3.deinited);
|
||||
|
||||
p.release(e2);
|
||||
try t.expectEqual(@as(i32, 5), e2.id);
|
||||
try t.expectEqual(false, e2.deinited);
|
||||
|
||||
p.release(e1);
|
||||
// TODO: how to test that e1 was properly released?
|
||||
}
|
||||
|
||||
test "pool: threadsafety" {
|
||||
id = 0;
|
||||
var p = try Pool(*TestEntry, i32).init(t.allocator, 4, TestEntry.init, 1);
|
||||
defer p.deinit();
|
||||
|
||||
const t1 = try std.Thread.spawn(.{}, testPool, .{&p});
|
||||
const t2 = try std.Thread.spawn(.{}, testPool, .{&p});
|
||||
const t3 = try std.Thread.spawn(.{}, testPool, .{&p});
|
||||
const t4 = try std.Thread.spawn(.{}, testPool, .{&p});
|
||||
const t5 = try std.Thread.spawn(.{}, testPool, .{&p});
|
||||
|
||||
t1.join(); t2.join(); t3.join(); t4.join(); t5.join();
|
||||
}
|
||||
|
||||
fn testPool(p: *Pool(*TestEntry, i32)) void {
|
||||
var r = t.getRandom();
|
||||
const random = r.random();
|
||||
|
||||
for (0..5000) |_| {
|
||||
var e = p.acquire() catch unreachable;
|
||||
std.debug.assert(e.acquired == false);
|
||||
e.acquired = true;
|
||||
std.time.sleep(random.uintAtMost(u32, 100000));
|
||||
e.acquired = false;
|
||||
p.release(e);
|
||||
}
|
||||
}
|
1235
src/.deps/http.zig/src/request.zig
Normal file
1235
src/.deps/http.zig/src/request.zig
Normal file
File diff suppressed because it is too large
Load Diff
626
src/.deps/http.zig/src/response.zig
Normal file
626
src/.deps/http.zig/src/response.zig
Normal file
|
@ -0,0 +1,626 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const t = @import("t.zig");
|
||||
const httpz = @import("httpz.zig");
|
||||
const KeyValue = @import("key_value.zig").KeyValue;
|
||||
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Stream = if (builtin.is_test) *t.Stream else std.net.Stream;
|
||||
|
||||
pub const Config = struct {
|
||||
max_header_count: usize = 16,
|
||||
body_buffer_size: usize = 32_768,
|
||||
header_buffer_size: usize = 4096,
|
||||
};
|
||||
|
||||
pub const Response = struct {
|
||||
// The stream to write the response to
|
||||
stream: Stream,
|
||||
|
||||
// Where in body we're writing to. Used for dynamically writes to body, e.g.
|
||||
// via the json() or writer() functions
|
||||
pos: usize,
|
||||
|
||||
// An explicit body to send
|
||||
body: ?[]const u8,
|
||||
|
||||
// The status code to write.
|
||||
status: u16,
|
||||
|
||||
// The response headers.
|
||||
// Using res.header(NAME, VALUE) is preferred.
|
||||
headers: KeyValue,
|
||||
|
||||
// The content type. Use header("content-type", value) for a content type
|
||||
// which isn't available in the httpz.ContentType enum.
|
||||
content_type: ?httpz.ContentType,
|
||||
|
||||
// A buffer that exists for the entire lifetime of the response. As we piece
|
||||
// our header together (e.g. looping through the headers to create NAME: value\r\n)
|
||||
// we buffer it in here to limit the # of calls we make to stream.write
|
||||
header_buffer: []u8,
|
||||
|
||||
// When possible (when it fits), we'll buffer the body into this static buffer,
|
||||
// which exists for the entire lifetime of the response. If the response doesn't
|
||||
// fit, we'll allocate the necessary space using the arena allocator.
|
||||
body_buffer: []u8,
|
||||
|
||||
// This is either a referene to body_buffer, or a dynamically allocated
|
||||
// buffer (in our arena). Used by our writer.
|
||||
writer_buffer: []u8,
|
||||
|
||||
// An arena that will be reset at the end of each request. Can be used
|
||||
// internally by this framework. The application is also free to make use of
|
||||
// this arena. This is the same arena as request.arena.
|
||||
arena: Allocator,
|
||||
|
||||
// whether or not we're sending a chunked response
|
||||
chunked: bool,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
// Should not be called directly, but initialized through a pool
|
||||
pub fn init(self: *Self, allocator: Allocator, arena: Allocator, config: Config) !void {
|
||||
self.arena = arena;
|
||||
self.headers = try KeyValue.init(allocator, config.max_header_count);
|
||||
self.body_buffer = try allocator.alloc(u8, config.body_buffer_size);
|
||||
self.header_buffer = try allocator.alloc(u8, config.header_buffer_size);
|
||||
self.reset();
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
self.headers.deinit(allocator);
|
||||
allocator.free(self.body_buffer);
|
||||
allocator.free(self.header_buffer);
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.pos = 0;
|
||||
self.body = null;
|
||||
self.status = 200;
|
||||
self.chunked = false;
|
||||
self.content_type = null;
|
||||
self.writer_buffer = self.body_buffer;
|
||||
self.headers.reset();
|
||||
}
|
||||
|
||||
pub fn json(self: *Self, value: anytype, options: std.json.StringifyOptions) !void {
|
||||
try std.json.stringify(value, options, Writer.init(self));
|
||||
self.content_type = httpz.ContentType.JSON;
|
||||
}
|
||||
|
||||
pub fn header(self: *Self, name: []const u8, value: []const u8) void {
|
||||
self.headers.add(name, value);
|
||||
}
|
||||
|
||||
pub fn write(self: *Self) !void {
|
||||
const stream = self.stream;
|
||||
if (self.chunked) {
|
||||
// every chunk write includes the trailing \r\n for the
|
||||
// previous chunk.
|
||||
try stream.writeAll("\r\n0\r\n\r\n");
|
||||
return;
|
||||
}
|
||||
|
||||
try self.writeHeaders(stream);
|
||||
if (self.body) |body| {
|
||||
const pos = self.pos;
|
||||
if (pos == 0) {
|
||||
try stream.writeAll(body);
|
||||
} else {
|
||||
try stream.writeAll(body[0..pos]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeHeaders(self: *Self, stream: Stream) !void {
|
||||
var header_pos: usize = 14; // "HTTP/1.1 XXX\r\n".len
|
||||
var header_buffer = self.header_buffer;
|
||||
|
||||
switch (self.status) {
|
||||
100 => @memcpy(header_buffer[0..14], "HTTP/1.1 100\r\n"),
|
||||
101 => @memcpy(header_buffer[0..14], "HTTP/1.1 101\r\n"),
|
||||
102 => @memcpy(header_buffer[0..14], "HTTP/1.1 102\r\n"),
|
||||
103 => @memcpy(header_buffer[0..14], "HTTP/1.1 103\r\n"),
|
||||
200 => @memcpy(header_buffer[0..14], "HTTP/1.1 200\r\n"),
|
||||
201 => @memcpy(header_buffer[0..14], "HTTP/1.1 201\r\n"),
|
||||
202 => @memcpy(header_buffer[0..14], "HTTP/1.1 202\r\n"),
|
||||
203 => @memcpy(header_buffer[0..14], "HTTP/1.1 203\r\n"),
|
||||
204 => @memcpy(header_buffer[0..14], "HTTP/1.1 204\r\n"),
|
||||
205 => @memcpy(header_buffer[0..14], "HTTP/1.1 205\r\n"),
|
||||
206 => @memcpy(header_buffer[0..14], "HTTP/1.1 206\r\n"),
|
||||
207 => @memcpy(header_buffer[0..14], "HTTP/1.1 207\r\n"),
|
||||
208 => @memcpy(header_buffer[0..14], "HTTP/1.1 208\r\n"),
|
||||
226 => @memcpy(header_buffer[0..14], "HTTP/1.1 226\r\n"),
|
||||
300 => @memcpy(header_buffer[0..14], "HTTP/1.1 300\r\n"),
|
||||
301 => @memcpy(header_buffer[0..14], "HTTP/1.1 301\r\n"),
|
||||
302 => @memcpy(header_buffer[0..14], "HTTP/1.1 302\r\n"),
|
||||
303 => @memcpy(header_buffer[0..14], "HTTP/1.1 303\r\n"),
|
||||
304 => @memcpy(header_buffer[0..14], "HTTP/1.1 304\r\n"),
|
||||
305 => @memcpy(header_buffer[0..14], "HTTP/1.1 305\r\n"),
|
||||
306 => @memcpy(header_buffer[0..14], "HTTP/1.1 306\r\n"),
|
||||
307 => @memcpy(header_buffer[0..14], "HTTP/1.1 307\r\n"),
|
||||
308 => @memcpy(header_buffer[0..14], "HTTP/1.1 308\r\n"),
|
||||
400 => @memcpy(header_buffer[0..14], "HTTP/1.1 400\r\n"),
|
||||
401 => @memcpy(header_buffer[0..14], "HTTP/1.1 401\r\n"),
|
||||
402 => @memcpy(header_buffer[0..14], "HTTP/1.1 402\r\n"),
|
||||
403 => @memcpy(header_buffer[0..14], "HTTP/1.1 403\r\n"),
|
||||
404 => @memcpy(header_buffer[0..14], "HTTP/1.1 404\r\n"),
|
||||
405 => @memcpy(header_buffer[0..14], "HTTP/1.1 405\r\n"),
|
||||
406 => @memcpy(header_buffer[0..14], "HTTP/1.1 406\r\n"),
|
||||
407 => @memcpy(header_buffer[0..14], "HTTP/1.1 407\r\n"),
|
||||
408 => @memcpy(header_buffer[0..14], "HTTP/1.1 408\r\n"),
|
||||
409 => @memcpy(header_buffer[0..14], "HTTP/1.1 409\r\n"),
|
||||
410 => @memcpy(header_buffer[0..14], "HTTP/1.1 410\r\n"),
|
||||
411 => @memcpy(header_buffer[0..14], "HTTP/1.1 411\r\n"),
|
||||
412 => @memcpy(header_buffer[0..14], "HTTP/1.1 412\r\n"),
|
||||
413 => @memcpy(header_buffer[0..14], "HTTP/1.1 413\r\n"),
|
||||
414 => @memcpy(header_buffer[0..14], "HTTP/1.1 414\r\n"),
|
||||
415 => @memcpy(header_buffer[0..14], "HTTP/1.1 415\r\n"),
|
||||
416 => @memcpy(header_buffer[0..14], "HTTP/1.1 416\r\n"),
|
||||
417 => @memcpy(header_buffer[0..14], "HTTP/1.1 417\r\n"),
|
||||
418 => @memcpy(header_buffer[0..14], "HTTP/1.1 418\r\n"),
|
||||
421 => @memcpy(header_buffer[0..14], "HTTP/1.1 421\r\n"),
|
||||
422 => @memcpy(header_buffer[0..14], "HTTP/1.1 422\r\n"),
|
||||
423 => @memcpy(header_buffer[0..14], "HTTP/1.1 423\r\n"),
|
||||
424 => @memcpy(header_buffer[0..14], "HTTP/1.1 424\r\n"),
|
||||
425 => @memcpy(header_buffer[0..14], "HTTP/1.1 425\r\n"),
|
||||
426 => @memcpy(header_buffer[0..14], "HTTP/1.1 426\r\n"),
|
||||
428 => @memcpy(header_buffer[0..14], "HTTP/1.1 428\r\n"),
|
||||
429 => @memcpy(header_buffer[0..14], "HTTP/1.1 429\r\n"),
|
||||
431 => @memcpy(header_buffer[0..14], "HTTP/1.1 431\r\n"),
|
||||
451 => @memcpy(header_buffer[0..14], "HTTP/1.1 451\r\n"),
|
||||
500 => @memcpy(header_buffer[0..14], "HTTP/1.1 500\r\n"),
|
||||
501 => @memcpy(header_buffer[0..14], "HTTP/1.1 501\r\n"),
|
||||
502 => @memcpy(header_buffer[0..14], "HTTP/1.1 502\r\n"),
|
||||
503 => @memcpy(header_buffer[0..14], "HTTP/1.1 503\r\n"),
|
||||
504 => @memcpy(header_buffer[0..14], "HTTP/1.1 504\r\n"),
|
||||
505 => @memcpy(header_buffer[0..14], "HTTP/1.1 505\r\n"),
|
||||
506 => @memcpy(header_buffer[0..14], "HTTP/1.1 506\r\n"),
|
||||
507 => @memcpy(header_buffer[0..14], "HTTP/1.1 507\r\n"),
|
||||
508 => @memcpy(header_buffer[0..14], "HTTP/1.1 508\r\n"),
|
||||
510 => @memcpy(header_buffer[0..14], "HTTP/1.1 510\r\n"),
|
||||
511 => @memcpy(header_buffer[0..14], "HTTP/1.1 511\r\n"),
|
||||
else => |s| {
|
||||
@memcpy(header_buffer[0..9], "HTTP/1.1 ");
|
||||
// "HTTP/1.1 ".len == 9
|
||||
header_pos = 9 + writeInt(header_buffer[9..], @as(u32, s));
|
||||
header_buffer[header_pos] = '\r';
|
||||
header_buffer[header_pos+1] = '\n';
|
||||
header_pos += 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (self.content_type) |ct| {
|
||||
const content_type = switch (ct) {
|
||||
.BINARY => "Content-Type: application/octet-stream\r\n",
|
||||
.CSS => "Content-Type: text/css\r\n",
|
||||
.CSV => "Content-Type: text/csv\r\n",
|
||||
.GIF => "Content-Type: image/gif\r\n",
|
||||
.GZ => "Content-Type: application/gzip\r\n",
|
||||
.HTML => "Content-Type: text/html\r\n",
|
||||
.ICO => "Content-Type: image/vnd.microsoft.icon\r\n",
|
||||
.JPG => "Content-Type: image/jpeg\r\n",
|
||||
.JS => "Content-Type: application/javascript\r\n",
|
||||
.JSON => "Content-Type: application/json\r\n",
|
||||
.PDF => "Content-Type: application/pdf\r\n",
|
||||
.PNG => "Content-Type: image/png\r\n",
|
||||
.SVG => "Content-Type: image/svg+xml\r\n",
|
||||
.TAR => "Content-Type: application/x-tar\r\n",
|
||||
.TEXT => "Content-Type: text/plain\r\n",
|
||||
.WEBP => "Content-Type: image/webp\r\n",
|
||||
.XML => "Content-Type: application/xml\r\n",
|
||||
};
|
||||
const end_pos = header_pos + content_type.len;
|
||||
@memcpy(header_buffer[header_pos..end_pos], content_type);
|
||||
header_pos = end_pos;
|
||||
}
|
||||
|
||||
{
|
||||
const headers = &self.headers;
|
||||
const header_count = headers.len;
|
||||
const names = headers.keys[0..header_count];
|
||||
const values = headers.values[0..header_count];
|
||||
for (names, values) |name, value| {
|
||||
// 4 for the colon + space between the name and value
|
||||
// and the trailing \r\n
|
||||
const header_line_length = name.len + value.len + 4;
|
||||
if (header_buffer.len < header_pos + header_line_length) {
|
||||
try stream.writeAll(header_buffer[0..header_pos]);
|
||||
header_pos = 0;
|
||||
}
|
||||
var end_pos = header_pos + name.len;
|
||||
@memcpy(header_buffer[header_pos..end_pos], name);
|
||||
header_pos = end_pos;
|
||||
header_buffer[header_pos] = ':';
|
||||
header_buffer[header_pos+1] = ' ';
|
||||
header_pos += 2;
|
||||
|
||||
end_pos = header_pos + value.len;
|
||||
@memcpy(header_buffer[header_pos..end_pos], value);
|
||||
header_pos = end_pos;
|
||||
header_buffer[header_pos] = '\r';
|
||||
header_buffer[header_pos+1] = '\n';
|
||||
header_pos += 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (self.body) |body| {
|
||||
std.debug.assert(self.chunked == false);
|
||||
if (header_buffer.len < header_pos + 32) {
|
||||
try stream.writeAll(header_buffer[0..header_pos]);
|
||||
header_pos = 0;
|
||||
}
|
||||
const end_pos = header_pos + 16;
|
||||
@memcpy(header_buffer[header_pos..end_pos], "Content-Length: ");
|
||||
header_pos = end_pos;
|
||||
const pos = self.pos;
|
||||
const len = if (pos > 0) pos else body.len;
|
||||
header_pos += writeInt(header_buffer[header_pos..], @intCast(u32, len));
|
||||
header_buffer[header_pos] = '\r';
|
||||
header_buffer[header_pos+1] = '\n';
|
||||
header_buffer[header_pos+2] = '\r';
|
||||
header_buffer[header_pos+3] = '\n';
|
||||
try stream.writeAll(header_buffer[0..(header_pos+4)]);
|
||||
} else {
|
||||
// for chunked encoding, we only terminate with a single \r\n
|
||||
// since the chunking prepends \r\n to each chunk
|
||||
const fin = if (self.chunked) "Transfer-Encoding: chunked\r\n" else "Content-Length: 0\r\n\r\n";
|
||||
const final_pos = header_pos + fin.len;
|
||||
if (header_pos == 0) {
|
||||
try stream.writeAll(fin);
|
||||
} else if (header_buffer.len < final_pos) {
|
||||
try stream.writeAll(header_buffer[0..header_pos]);
|
||||
try stream.writeAll(fin);
|
||||
} else {
|
||||
@memcpy(header_buffer[header_pos..(header_pos+fin.len)], fin);
|
||||
try stream.writeAll(header_buffer[0..final_pos]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn chunk(self: *Self, data: []const u8) !void {
|
||||
const stream = self.stream;
|
||||
if (!self.chunked) {
|
||||
self.chunked = true;
|
||||
try self.writeHeaders(stream);
|
||||
}
|
||||
const buf = self.header_buffer;
|
||||
buf[0] = '\r';
|
||||
buf[1] = '\n';
|
||||
const len = 2 + std.fmt.formatIntBuf(buf[2..], data.len, 16, .upper, .{});
|
||||
buf[len] = '\r';
|
||||
buf[len+1] = '\n';
|
||||
try stream.writeAll(buf[0..len+2]);
|
||||
try stream.writeAll(data);
|
||||
}
|
||||
|
||||
pub fn writer(self: *Self) Writer.IOWriter {
|
||||
return .{.context = Writer.init(self)};
|
||||
}
|
||||
|
||||
pub fn directWriter(self: *Self) Writer {
|
||||
return Writer.init(self);
|
||||
}
|
||||
|
||||
// writer optimized for std.json.stringify, but that can also be used as a
|
||||
// more generic std.io.Writer.
|
||||
pub const Writer = struct {
|
||||
res: *Response,
|
||||
|
||||
pub const Error = Allocator.Error;
|
||||
pub const IOWriter = std.io.Writer(Writer, error{OutOfMemory}, Writer.write);
|
||||
|
||||
fn init(res: *Response) Writer {
|
||||
// We point our res.body to our body_buffer
|
||||
// When we write out the response, we'll check res.pos to see if this
|
||||
// needs to be sliced.
|
||||
// Also, if this writer needs to dynamically allocate a buffer,
|
||||
// it'll re-assign that to res.body.
|
||||
const buffer = res.body_buffer;
|
||||
res.body = buffer;
|
||||
res.writer_buffer = buffer;
|
||||
return Writer{.res = res};
|
||||
}
|
||||
|
||||
pub fn truncate(self: Writer, n: usize) void {
|
||||
var pos = self.res.pos;
|
||||
const to_truncate = if (pos > n) n else pos;
|
||||
self.res.pos = pos - to_truncate;
|
||||
}
|
||||
|
||||
pub fn writeByte(self: Writer, b: u8) !void {
|
||||
try self.ensureSpace(1);
|
||||
const pos = self.res.pos;
|
||||
self.res.writer_buffer[pos] = b;
|
||||
self.res.pos = pos + 1;
|
||||
}
|
||||
|
||||
pub fn writeByteNTimes(self: Writer, b: u8, n: usize) !void {
|
||||
try self.ensureSpace(n);
|
||||
var pos = self.res.pos;
|
||||
const buffer = self.res.writer_buffer;
|
||||
for (0..n) |offset| {
|
||||
buffer[pos+offset] = b;
|
||||
}
|
||||
self.res.pos = pos + n;
|
||||
}
|
||||
|
||||
pub fn writeAll(self: Writer, data: []const u8) !void {
|
||||
try self.ensureSpace(data.len);
|
||||
const pos = self.res.pos;
|
||||
const end_pos = pos + data.len;
|
||||
@memcpy(self.res.writer_buffer[pos..end_pos], data);
|
||||
self.res.pos = end_pos;
|
||||
}
|
||||
|
||||
pub fn write(self: Writer, data: []const u8) Allocator.Error!usize {
|
||||
try self.writeAll(data);
|
||||
return data.len;
|
||||
}
|
||||
|
||||
fn ensureSpace(self: Writer, n: usize) !void {
|
||||
const res = self.res;
|
||||
|
||||
const pos = res.pos;
|
||||
const buffer = res.writer_buffer;
|
||||
const required_capacity = pos + n;
|
||||
|
||||
if (buffer.len >= required_capacity) {
|
||||
// we have enough space in our body as-is
|
||||
return;
|
||||
}
|
||||
|
||||
// taken from std.ArrayList
|
||||
var new_capacity = buffer.len;
|
||||
while (true) {
|
||||
new_capacity +|= new_capacity / 2 + 8;
|
||||
if (new_capacity >= required_capacity) break;
|
||||
}
|
||||
|
||||
const arena = res.arena;
|
||||
|
||||
// If this is our static body_buffer, we need to allocate a new dynamic space
|
||||
// If it's a dynamic buffer, we'll first try to resize it.
|
||||
// You might be thinking that in the 2nd case, we need to free the previous
|
||||
// body in the case that resize fails. We don't, because it'll be freed
|
||||
// when the arena is freed
|
||||
if (buffer.ptr == res.body_buffer.ptr or !arena.resize(buffer, new_capacity)) {
|
||||
const new_buffer = try arena.alloc(u8, new_capacity);
|
||||
@memcpy(new_buffer[0..buffer.len], buffer);
|
||||
res.body = new_buffer;
|
||||
res.writer_buffer = new_buffer;
|
||||
} else {
|
||||
const new_buffer = buffer.ptr[0..new_capacity];
|
||||
res.body = new_buffer;
|
||||
res.writer_buffer = new_buffer;
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
fn writeInt(into: []u8, n: u32) usize {
|
||||
if (n == 0) {
|
||||
into[0] = '0';
|
||||
return 1;
|
||||
}
|
||||
|
||||
var num = n;
|
||||
var i: usize = 0;
|
||||
while (num != 0) : (i += 1) {
|
||||
const rem = num % 10;
|
||||
into[i] = @intCast(u8, rem) + '0';
|
||||
num = num / 10;
|
||||
}
|
||||
const a = into[0..i];
|
||||
std.mem.reverse(u8, a);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
||||
test "writeInt" {
|
||||
var buf: [10]u8 = undefined;
|
||||
var tst: [10]u8 = undefined;
|
||||
for (0..100_009) |i| {
|
||||
const expected_len = std.fmt.formatIntBuf(tst[0..], i, 10, .lower, .{});
|
||||
const l = writeInt(&buf, @intCast(u32, i));
|
||||
try t.expectString(tst[0..expected_len], buf[0..l]);
|
||||
}
|
||||
}
|
||||
|
||||
test "response: write" {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
{
|
||||
// no body
|
||||
res.status = 401;
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 401\r\nContent-Length: 0\r\n\r\n", s.received.items);
|
||||
}
|
||||
|
||||
{
|
||||
// body
|
||||
s.reset(); res.reset();
|
||||
res.status = 200;
|
||||
res.body = "hello";
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Length: 5\r\n\r\nhello", s.received.items);
|
||||
}
|
||||
}
|
||||
|
||||
test "response: content_type" {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
{
|
||||
res.content_type = httpz.ContentType.WEBP;
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Type: image/webp\r\nContent-Length: 0\r\n\r\n", s.received.items);
|
||||
}
|
||||
}
|
||||
|
||||
test "response: write header_buffer_size" {
|
||||
{
|
||||
// no header or bodys
|
||||
// 19 is the length of our longest header line
|
||||
for (19..40) |i| {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{.header_buffer_size = i});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
res.status = 792;
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 792\r\nContent-Length: 0\r\n\r\n", s.received.items);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// no body
|
||||
// 19 is the length of our longest header line
|
||||
for (19..110) |i| {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{.header_buffer_size = i});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
res.status = 401;
|
||||
res.header("a-header", "a-value");
|
||||
res.header("b-hdr", "b-val");
|
||||
res.header("c-header11", "cv");
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 401\r\na-header: a-value\r\nb-hdr: b-val\r\nc-header11: cv\r\nContent-Length: 0\r\n\r\n", s.received.items);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// 22 is the length of our longest header line (the content-length)
|
||||
for (22..110) |i| {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{.header_buffer_size = i});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
res.status = 8;
|
||||
res.header("a-header", "a-value");
|
||||
res.header("b-hdr", "b-val");
|
||||
res.header("c-header11", "cv");
|
||||
res.body = "hello world!";
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 8\r\na-header: a-value\r\nb-hdr: b-val\r\nc-header11: cv\r\nContent-Length: 12\r\n\r\nhello world!", s.received.items);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "response: json fuzz" {
|
||||
var r = t.getRandom();
|
||||
const random = r.random();
|
||||
|
||||
for (0..1) |_| {
|
||||
const body = t.randomString(random, t.allocator, 1000);
|
||||
defer t.allocator.free(body);
|
||||
const expected_encoded_length = body.len + 2; // wrapped in double quotes
|
||||
|
||||
for (0..100) |i| {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{.body_buffer_size = i});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
res.status = 200;
|
||||
try res.json(body, .{});
|
||||
try res.write();
|
||||
|
||||
const expected = try std.fmt.allocPrint(t.arena, "HTTP/1.1 200\r\nContent-Type: application/json\r\nContent-Length: {d}\r\n\r\n\"{s}\"", .{expected_encoded_length, body});
|
||||
try t.expectString(expected, s.received.items);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "response: writer fuzz" {
|
||||
var r = t.getRandom();
|
||||
const random = r.random();
|
||||
|
||||
for (0..1) |_| {
|
||||
const body = t.randomString(random, t.allocator, 1000);
|
||||
defer t.allocator.free(body);
|
||||
const expected_encoded_length = body.len + 2; // wrapped in double quotes
|
||||
|
||||
for (0..100) |i| {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{.body_buffer_size = i});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
res.status = 204;
|
||||
try std.json.stringify(body, .{}, res.writer());
|
||||
try res.write();
|
||||
|
||||
const expected = try std.fmt.allocPrint(t.arena, "HTTP/1.1 204\r\nContent-Length: {d}\r\n\r\n\"{s}\"", .{expected_encoded_length, body});
|
||||
try t.expectString(expected, s.received.items);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "response: direct writer" {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
var writer = res.directWriter();
|
||||
writer.truncate(1);
|
||||
try writer.writeByte('[');
|
||||
writer.truncate(4);
|
||||
try writer.writeByte('[');
|
||||
try writer.writeAll("12345");
|
||||
writer.truncate(2);
|
||||
try writer.writeByte(',');
|
||||
try writer.writeAll("456");
|
||||
try writer.writeByte(',');
|
||||
writer.truncate(1);
|
||||
try writer.writeByte(']');
|
||||
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 200\r\nContent-Length: 9\r\n\r\n[123,456]", s.received.items);
|
||||
}
|
||||
|
||||
test "response: chunked" {
|
||||
var s = t.Stream.init();
|
||||
var res = testResponse(s, .{});
|
||||
defer testCleanup(res, s);
|
||||
|
||||
{
|
||||
// no headers, single chunk
|
||||
res.status = 200;
|
||||
try res.chunk("Hello");
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 200\r\nTransfer-Encoding: chunked\r\n\r\n5\r\nHello\r\n0\r\n\r\n", s.received.items);
|
||||
}
|
||||
|
||||
{
|
||||
// headers, multiple chunk
|
||||
s.reset(); res.reset();
|
||||
res.status = 1;
|
||||
res.content_type = httpz.ContentType.XML;
|
||||
res.header("Test", "Chunked");
|
||||
try res.chunk("Hello");
|
||||
try res.chunk("another slightly bigger chunk");
|
||||
try res.write();
|
||||
try t.expectString("HTTP/1.1 1\r\nContent-Type: application/xml\r\nTest: Chunked\r\nTransfer-Encoding: chunked\r\n\r\n5\r\nHello\r\n1D\r\nanother slightly bigger chunk\r\n0\r\n\r\n", s.received.items);
|
||||
}
|
||||
}
|
||||
|
||||
fn testResponse(stream: Stream, config: Config) *Response {
|
||||
var res = t.allocator.create(Response) catch unreachable;
|
||||
res.init(t.allocator, t.allocator, config) catch unreachable;
|
||||
res.arena = t.arena;
|
||||
res.stream = stream;
|
||||
return res;
|
||||
}
|
||||
|
||||
fn testCleanup(r: *Response, s: *t.Stream) void {
|
||||
r.deinit(t.allocator);
|
||||
t.reset();
|
||||
t.allocator.destroy(r);
|
||||
defer s.deinit();
|
||||
}
|
620
src/.deps/http.zig/src/router.zig
Normal file
620
src/.deps/http.zig/src/router.zig
Normal file
|
@ -0,0 +1,620 @@
|
|||
const std = @import("std");
|
||||
|
||||
const t = @import("t.zig");
|
||||
const httpz = @import("httpz.zig");
|
||||
const Params = @import("params.zig").Params;
|
||||
const Request = @import("request.zig").Request;
|
||||
const Response = @import("response.zig").Response;
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const StringHashMap = std.StringHashMap;
|
||||
|
||||
pub fn Config(comptime G: type, comptime R: type) type {
|
||||
const Dispatcher = httpz.Dispatcher(G, R);
|
||||
return struct {
|
||||
dispatcher: ?Dispatcher = null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Router(comptime G: type, comptime R: type) type {
|
||||
const Action = httpz.Action(R);
|
||||
const Dispatcher = httpz.Dispatcher(G, R);
|
||||
const DispatchableAction = httpz.DispatchableAction(G, R);
|
||||
|
||||
return struct {
|
||||
_allocator: Allocator,
|
||||
_get: Part(DispatchableAction),
|
||||
_put: Part(DispatchableAction),
|
||||
_post: Part(DispatchableAction),
|
||||
_head: Part(DispatchableAction),
|
||||
_patch: Part(DispatchableAction),
|
||||
_delete: Part(DispatchableAction),
|
||||
_options: Part(DispatchableAction),
|
||||
_default_dispatcher: Dispatcher,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator, default_dispatcher: Dispatcher) !Self {
|
||||
return Self{
|
||||
._allocator = allocator,
|
||||
._default_dispatcher = default_dispatcher,
|
||||
._get = try Part(DispatchableAction).init(allocator),
|
||||
._head = try Part(DispatchableAction).init(allocator),
|
||||
._post = try Part(DispatchableAction).init(allocator),
|
||||
._put = try Part(DispatchableAction).init(allocator),
|
||||
._patch = try Part(DispatchableAction).init(allocator),
|
||||
._delete = try Part(DispatchableAction).init(allocator),
|
||||
._options = try Part(DispatchableAction).init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
const allocator = self._allocator;
|
||||
self._get.deinit(allocator);
|
||||
self._post.deinit(allocator);
|
||||
self._put.deinit(allocator);
|
||||
self._delete.deinit(allocator);
|
||||
self._patch.deinit(allocator);
|
||||
self._head.deinit(allocator);
|
||||
self._options.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn dispatcher(self: *Self, d: Dispatcher) void {
|
||||
self._default_dispatcher = d;
|
||||
}
|
||||
|
||||
pub fn route(self: Self, method: httpz.Method, url: []const u8, params: *Params) ?DispatchableAction {
|
||||
return switch (method) {
|
||||
httpz.Method.GET => getRoute(DispatchableAction, self._get, url, params),
|
||||
httpz.Method.POST => getRoute(DispatchableAction, self._post, url, params),
|
||||
httpz.Method.PUT => getRoute(DispatchableAction, self._put, url, params),
|
||||
httpz.Method.DELETE => getRoute(DispatchableAction, self._delete, url, params),
|
||||
httpz.Method.PATCH => getRoute(DispatchableAction, self._patch, url, params),
|
||||
httpz.Method.HEAD => getRoute(DispatchableAction, self._head, url, params),
|
||||
httpz.Method.OPTIONS => getRoute(DispatchableAction, self._options, url, params),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get(self: *Self, path: []const u8, action: Action) void {
|
||||
self.getC(path, action, .{});
|
||||
}
|
||||
pub fn tryGet(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryGetC(path, action, .{});
|
||||
}
|
||||
pub fn getC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryGetC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryGetC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._get, path, da);
|
||||
}
|
||||
|
||||
pub fn put(self: *Self, path: []const u8, action: Action) void {
|
||||
self.putC(path, action, .{});
|
||||
}
|
||||
pub fn tryPut(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryPutC(path, action, .{});
|
||||
}
|
||||
pub fn putC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryPutC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryPutC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._put, path, da);
|
||||
}
|
||||
|
||||
pub fn post(self: *Self, path: []const u8, action: Action) void {
|
||||
self.postC(path, action, .{});
|
||||
}
|
||||
pub fn tryPost(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryPostC(path, action, .{});
|
||||
}
|
||||
pub fn postC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryPostC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryPostC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._post, path, da);
|
||||
}
|
||||
|
||||
pub fn head(self: *Self, path: []const u8, action: Action) void {
|
||||
self.headC(path, action, .{});
|
||||
}
|
||||
pub fn tryHead(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryHeadC(path, action, .{});
|
||||
}
|
||||
pub fn headC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryHeadC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryHeadC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._head, path, da);
|
||||
}
|
||||
|
||||
pub fn patch(self: *Self, path: []const u8, action: Action) void {
|
||||
self.patchC(path, action, .{});
|
||||
}
|
||||
pub fn tryPatch(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryPatchC(path, action, .{});
|
||||
}
|
||||
pub fn patchC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryPatchC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryPatchC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._patch, path, da);
|
||||
}
|
||||
|
||||
pub fn delete(self: *Self, path: []const u8, action: Action) void {
|
||||
self.deleteC(path, action, .{});
|
||||
}
|
||||
pub fn tryDelete(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryDeleteC(path, action, .{});
|
||||
}
|
||||
pub fn deleteC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryDeleteC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryDeleteC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._delete, path, da);
|
||||
}
|
||||
|
||||
pub fn option(self: *Self, path: []const u8, action: Action) void {
|
||||
self.optionC(path, action, .{});
|
||||
}
|
||||
pub fn tryOption(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryOptionC(path, action, .{});
|
||||
}
|
||||
pub fn optionC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryOptionC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryOptionC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
const da = DispatchableAction{
|
||||
.action = action,
|
||||
.dispatcher = config.dispatcher orelse self._default_dispatcher,
|
||||
};
|
||||
try addRoute(DispatchableAction, self._allocator, &self._options, path, da);
|
||||
}
|
||||
|
||||
pub fn all(self: *Self, path: []const u8, action: Action) void {
|
||||
self.allC(path, action, .{});
|
||||
}
|
||||
pub fn tryAll(self: *Self, path: []const u8, action: Action) !void {
|
||||
return self.tryAllC(path, action, .{});
|
||||
}
|
||||
pub fn allC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) void {
|
||||
self.tryAllC(path, action, config) catch @panic("failed to create route");
|
||||
}
|
||||
pub fn tryAllC(self: *Self, path: []const u8, action: Action, config: Config(G, R)) !void {
|
||||
try self.tryGetC(path, action, config);
|
||||
try self.tryPutC(path, action, config);
|
||||
try self.tryPostC(path, action, config);
|
||||
try self.tryHeadC(path, action, config);
|
||||
try self.tryPatchC(path, action, config);
|
||||
try self.tryDeleteC(path, action, config);
|
||||
try self.tryOptionC(path, action, config);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Part(comptime A: type) type {
|
||||
return struct{
|
||||
action: ?A,
|
||||
glob: ?*Part(A),
|
||||
glob_all: bool,
|
||||
param_part: ?*Part(A),
|
||||
param_names: ?[][]const u8,
|
||||
parts: StringHashMap(Part(A)),
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: Allocator) !Self {
|
||||
return Self{
|
||||
.glob = null,
|
||||
.glob_all = false,
|
||||
.action = null,
|
||||
.param_part = null,
|
||||
.param_names = null,
|
||||
.parts = StringHashMap(Part(A)).init(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn clear(self: *Self, allocator: Allocator) void {
|
||||
self.glob = null;
|
||||
self.glob_all = false;
|
||||
self.action = null;
|
||||
self.param_part = null;
|
||||
self.param_names = null;
|
||||
self.parts = StringHashMap(Part(A)).init(allocator);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: Allocator) void {
|
||||
var it = self.parts.valueIterator();
|
||||
while (it.next()) |part| {
|
||||
part.deinit(allocator);
|
||||
}
|
||||
self.parts.deinit();
|
||||
|
||||
if (self.param_part) |part| {
|
||||
part.deinit(allocator);
|
||||
allocator.destroy(part);
|
||||
}
|
||||
|
||||
if (self.param_names) |names| {
|
||||
allocator.free(names);
|
||||
}
|
||||
|
||||
if (self.glob) |glob| {
|
||||
glob.deinit(allocator);
|
||||
allocator.destroy(glob);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn addRoute(comptime A: type, allocator: Allocator, root: *Part(A), url: []const u8, action: A) !void {
|
||||
if (url.len == 0 or (url.len == 1 and url[0] == '/')) {
|
||||
root.action = action;
|
||||
return;
|
||||
}
|
||||
|
||||
var normalized = url;
|
||||
if (normalized[0] == '/') {
|
||||
normalized = normalized[1..];
|
||||
}
|
||||
if (normalized[normalized.len - 1] == '/') {
|
||||
normalized = normalized[0..normalized.len - 1];
|
||||
}
|
||||
|
||||
var param_name_collector = std.ArrayList([]const u8).init(allocator);
|
||||
defer param_name_collector.deinit();
|
||||
|
||||
var route_part = root;
|
||||
var it = std.mem.split(u8, normalized, "/");
|
||||
while (it.next()) |part| {
|
||||
if (part[0] == ':') {
|
||||
try param_name_collector.append(part[1..]);
|
||||
if (route_part.param_part) |child| {
|
||||
route_part = child;
|
||||
} else {
|
||||
const child = try allocator.create(Part(A));
|
||||
child.clear(allocator);
|
||||
route_part.param_part = child;
|
||||
route_part = child;
|
||||
}
|
||||
} else if (part.len == 1 and part[0] == '*') {
|
||||
// if this route_part didn't already have an action, then this glob also
|
||||
// includes it
|
||||
if (route_part.action == null) {
|
||||
route_part.action = action;
|
||||
}
|
||||
|
||||
if (route_part.glob) |child| {
|
||||
route_part = child;
|
||||
} else {
|
||||
const child = try allocator.create(Part(A));
|
||||
child.clear(allocator);
|
||||
route_part.glob = child;
|
||||
route_part = child;
|
||||
}
|
||||
} else {
|
||||
var gop = try route_part.parts.getOrPut(part);
|
||||
if (gop.found_existing) {
|
||||
route_part = gop.value_ptr;
|
||||
} else {
|
||||
route_part = gop.value_ptr;
|
||||
route_part.clear(allocator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const param_name_count = param_name_collector.items.len;
|
||||
if (param_name_count > 0) {
|
||||
const param_names = try allocator.alloc([]const u8, param_name_count);
|
||||
for (param_name_collector.items, 0..) |name, i| {
|
||||
param_names[i] = name;
|
||||
}
|
||||
route_part.param_names = param_names;
|
||||
}
|
||||
|
||||
// if the route ended with a '*' (importantly, as opposed to a '*/') then
|
||||
// this is a "glob all" route will. Important, use "url" and not "normalized"
|
||||
// since normalized stripped out the trailing / (if any), which is important
|
||||
// here
|
||||
route_part.glob_all = url[url.len - 1] == '*';
|
||||
|
||||
route_part.action = action;
|
||||
}
|
||||
|
||||
fn getRoute(comptime A: type, root: Part(A), url: []const u8, params: *Params) ?A {
|
||||
if (url.len == 0 or (url.len == 1 and url[0] == '/')) {
|
||||
return root.action;
|
||||
}
|
||||
|
||||
var normalized = url;
|
||||
if (normalized[0] == '/') {
|
||||
normalized = normalized[1..];
|
||||
}
|
||||
if (normalized[normalized.len - 1] == '/') {
|
||||
normalized = normalized[0..normalized.len - 1];
|
||||
}
|
||||
|
||||
var r = root;
|
||||
var route_part = &r;
|
||||
|
||||
var glob_all: ?*Part(A) = null;
|
||||
var it = std.mem.split(u8, normalized, "/");
|
||||
while (it.next()) |part| {
|
||||
// the most specific "glob_all" route we find, which is the one most deeply
|
||||
// nested, is the one we'll use in case there are no other matches.
|
||||
if (route_part.glob_all) {
|
||||
glob_all = route_part;
|
||||
}
|
||||
|
||||
if (route_part.parts.getPtr(part)) |child| {
|
||||
route_part = child;
|
||||
} else if (route_part.param_part) |child| {
|
||||
params.addValue(part);
|
||||
route_part = child;
|
||||
} else if (route_part.glob) |child| {
|
||||
route_part = child;
|
||||
} else {
|
||||
params.len = 0;
|
||||
if (glob_all) |fallback| {
|
||||
return fallback.action;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
if (route_part.action) |action| {
|
||||
if (route_part.param_names) |names| {
|
||||
params.addNames(names);
|
||||
} else {
|
||||
params.len = 0;
|
||||
}
|
||||
return action;
|
||||
}
|
||||
|
||||
params.len = 0;
|
||||
if (glob_all) |fallback| {
|
||||
return fallback.action;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
test "route: root" {
|
||||
var params = try Params.init(t.allocator, 5);
|
||||
defer params.deinit(t.allocator);
|
||||
|
||||
var router = Router(void, void).init(t.allocator, testDispatcher1) catch unreachable;
|
||||
defer router.deinit();
|
||||
router.get("/", testRoute1);
|
||||
router.put("/", testRoute2);
|
||||
router.post("", testRoute3);
|
||||
router.all("/all", testRoute4);
|
||||
|
||||
var urls = .{"/", "/other", "/all"};
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, "", ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute2, router.route(httpz.Method.PUT, "", ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute3, router.route(httpz.Method.POST, "", ¶ms).?.action);
|
||||
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, urls[0], ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute2, router.route(httpz.Method.PUT, urls[0], ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute3, router.route(httpz.Method.POST, urls[0], ¶ms).?.action);
|
||||
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.GET, urls[1], ¶ms));
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.DELETE, urls[0], ¶ms));
|
||||
|
||||
// test "all" route
|
||||
inline for (@typeInfo(httpz.Method).Enum.fields) |field| {
|
||||
const m = @intToEnum(httpz.Method, field.value);
|
||||
try t.expectEqual(&testRoute4, router.route(m, urls[2], ¶ms).?.action);
|
||||
}
|
||||
}
|
||||
|
||||
test "route: static" {
|
||||
var params = try Params.init(t.allocator, 5);
|
||||
defer params.deinit(t.allocator);
|
||||
|
||||
var router = Router(void, void).init(t.allocator, testDispatcher1) catch unreachable;
|
||||
defer router.deinit();
|
||||
router.get("hello/world", testRoute1);
|
||||
router.get("/over/9000/", testRoute2);
|
||||
|
||||
{
|
||||
const urls = .{"hello/world", "/hello/world", "hello/world/", "/hello/world/"};
|
||||
// all trailing/leading slash combinations
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, urls[0], ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, urls[1], ¶ms).?.action);
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, urls[2], ¶ms).?.action);
|
||||
}
|
||||
|
||||
{
|
||||
const urls = .{"over/9000", "/over/9000", "over/9000/", "/over/9000/"};
|
||||
// all trailing/leading slash combinations
|
||||
inline for (urls) |url| {
|
||||
try t.expectEqual(&testRoute2, router.route(httpz.Method.GET, url, ¶ms).?.action);
|
||||
|
||||
// different method
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.PUT, url, ¶ms));
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// random not found
|
||||
const urls = .{"over/9000!", "over/ 9000"};
|
||||
inline for (urls) |url| {
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.GET, url, ¶ms));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "route: params" {
|
||||
var params = try Params.init(t.allocator, 5);
|
||||
defer params.deinit(t.allocator);
|
||||
|
||||
var router = Router(void, void).init(t.allocator, testDispatcher1) catch unreachable;
|
||||
defer router.deinit();
|
||||
router.get("/:p1", testRoute1);
|
||||
router.get("/users/:p2", testRoute2);
|
||||
router.get("/users/:p2/fav", testRoute3);
|
||||
router.get("/users/:p2/like", testRoute4);
|
||||
router.get("/users/:p2/fav/:p3", testRoute5);
|
||||
router.get("/users/:p2/like/:p3", testRoute6);
|
||||
|
||||
{
|
||||
// root param
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, "info", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 1), params.len);
|
||||
try t.expectString("info", params.get("p1").?);
|
||||
}
|
||||
|
||||
{
|
||||
// nested param
|
||||
params.reset();
|
||||
try t.expectEqual(&testRoute2, router.route(httpz.Method.GET, "/users/33", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 1), params.len);
|
||||
try t.expectString("33", params.get("p2").?);
|
||||
}
|
||||
|
||||
{
|
||||
// nested param with statix suffix
|
||||
params.reset();
|
||||
try t.expectEqual(&testRoute3, router.route(httpz.Method.GET, "/users/9/fav", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 1), params.len);
|
||||
try t.expectString("9", params.get("p2").?);
|
||||
|
||||
params.reset();
|
||||
try t.expectEqual(&testRoute4, router.route(httpz.Method.GET, "/users/9/like", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 1), params.len);
|
||||
try t.expectString("9", params.get("p2").?);
|
||||
}
|
||||
|
||||
{
|
||||
// nested params
|
||||
params.reset();
|
||||
try t.expectEqual(&testRoute5, router.route(httpz.Method.GET, "/users/u1/fav/blue", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 2), params.len);
|
||||
try t.expectString("u1", params.get("p2").?);
|
||||
try t.expectString("blue", params.get("p3").?);
|
||||
|
||||
params.reset();
|
||||
try t.expectEqual(&testRoute6, router.route(httpz.Method.GET, "/users/u3/like/tea", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 2), params.len);
|
||||
try t.expectString("u3", params.get("p2").?);
|
||||
try t.expectString("tea", params.get("p3").?);
|
||||
}
|
||||
|
||||
{
|
||||
// not_found
|
||||
params.reset();
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.GET, "/users/u1/other", ¶ms));
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
|
||||
try t.expectEqual(@as(?httpz.DispatchableAction(void, void), null), router.route(httpz.Method.GET, "/users/u1/favss/blue", ¶ms));
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
}
|
||||
}
|
||||
|
||||
test "route: glob" {
|
||||
var params = try Params.init(t.allocator, 5);
|
||||
defer params.deinit(t.allocator);
|
||||
|
||||
var router = Router(void, void).init(t.allocator, testDispatcher1) catch unreachable;
|
||||
defer router.deinit();
|
||||
router.get("/*", testRoute1);
|
||||
router.get("/users/*", testRoute2);
|
||||
router.get("/users/*/test", testRoute3);
|
||||
router.get("/users/other/test", testRoute4);
|
||||
|
||||
{
|
||||
// root glob
|
||||
const urls = .{"/anything", "/this/could/be/anything", "/"};
|
||||
inline for (urls) |url| {
|
||||
try t.expectEqual(&testRoute1, router.route(httpz.Method.GET, url, ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// nest glob
|
||||
const urls = .{"/users/", "/users", "/users/hello", "/users/could/be/anything"};
|
||||
inline for (urls) |url| {
|
||||
try t.expectEqual(&testRoute2, router.route(httpz.Method.GET, url, ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// nest glob specific
|
||||
const urls = .{"/users/hello/test", "/users/x/test"};
|
||||
inline for (urls) |url| {
|
||||
try t.expectEqual(&testRoute3, router.route(httpz.Method.GET, url, ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// nest glob specific
|
||||
try t.expectEqual(&testRoute4, router.route(httpz.Method.GET, "/users/other/test", ¶ms).?.action);
|
||||
try t.expectEqual(@as(usize, 0), params.len);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this functionality isn't implemented because I can't think of a way
|
||||
// to do it which isn't relatively expensive (e.g. recursively or keeping a
|
||||
// stack of (a) parts and (b) url segments and trying to rematch every possible
|
||||
// combination of param + part)...and I don't know that this use-case is really
|
||||
// common.
|
||||
|
||||
// test "route: ambiguous params" {
|
||||
// var params = try Params.init(t.allocator, 5);
|
||||
// defer params.deinit();
|
||||
|
||||
// var router = Router(u32).init(t.allocator, 9999999) catch unreachable;
|
||||
// defer router.deinit();
|
||||
// router.get("/:any/users", 1, .{});
|
||||
// router.get("/hello/users/test", 2, .{});
|
||||
|
||||
// {
|
||||
// try t.expectEqual(@as(u32, 1), router.route(httpz.Method.GET, "/x/users", ¶ms));
|
||||
// try t.expectEqual(@as(usize, 1), params.len);
|
||||
// try t.expectString("x", params.get("any"));
|
||||
|
||||
// params.reset();
|
||||
// try t.expectEqual(@as(u32, 2), router.route(httpz.Method.GET, "/hello/users/test", ¶ms));
|
||||
// try t.expectEqual(@as(usize, 0), params.len);
|
||||
|
||||
// params.reset();
|
||||
// try t.expectEqual(@as(u32, 1), router.route(httpz.Method.GET, "/hello/users", ¶ms));
|
||||
// try t.expectEqual(@as(usize, 1), params.len);
|
||||
// try t.expectString("hello", params.get("any"));
|
||||
// }
|
||||
// }
|
||||
|
||||
fn testDispatcher1(_: httpz.Action(void), _: *Request, _: *Response) anyerror!void{}
|
||||
fn testRoute1(_: *Request, _: *Response) anyerror!void {}
|
||||
fn testRoute2(_: *Request, _: *Response) anyerror!void {}
|
||||
fn testRoute3(_: *Request, _: *Response) anyerror!void {}
|
||||
fn testRoute4(_: *Request, _: *Response) anyerror!void {}
|
||||
fn testRoute5(_: *Request, _: *Response) anyerror!void {}
|
||||
fn testRoute6(_: *Request, _: *Response) anyerror!void {}
|
118
src/.deps/http.zig/src/t.zig
Normal file
118
src/.deps/http.zig/src/t.zig
Normal file
|
@ -0,0 +1,118 @@
|
|||
// Internal helpers used by this library
|
||||
// If you're looking for helpers to help you mock/test
|
||||
// httpz.Request and httpz.Response, checkout testing.zig
|
||||
// which is exposed as httpz.testing.
|
||||
const std = @import("std");
|
||||
|
||||
const mem = std.mem;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
pub const expect = std.testing.expect;
|
||||
pub const allocator = std.testing.allocator;
|
||||
|
||||
pub const expectEqual = std.testing.expectEqual;
|
||||
pub const expectError = std.testing.expectError;
|
||||
pub const expectString = std.testing.expectEqualStrings;
|
||||
|
||||
pub var aa = std.heap.ArenaAllocator.init(allocator);
|
||||
pub const arena = aa.allocator();
|
||||
|
||||
pub fn reset() void {
|
||||
_ = aa.reset(.free_all);
|
||||
}
|
||||
|
||||
pub fn getRandom() std.rand.DefaultPrng {
|
||||
var seed: u64 = undefined;
|
||||
std.os.getrandom(std.mem.asBytes(&seed)) catch unreachable;
|
||||
return std.rand.DefaultPrng.init(seed);
|
||||
}
|
||||
|
||||
pub const Stream = struct {
|
||||
closed: bool,
|
||||
read_index: usize,
|
||||
to_read: ArrayList(u8),
|
||||
random: std.rand.DefaultPrng,
|
||||
received: ArrayList(u8),
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init() *Stream {
|
||||
var s = allocator.create(Stream) catch unreachable;
|
||||
s.closed = false;
|
||||
s.read_index = 0;
|
||||
s.random = getRandom();
|
||||
s.to_read = ArrayList(u8).init(allocator);
|
||||
s.received = ArrayList(u8).init(allocator);
|
||||
return s;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.to_read.deinit();
|
||||
self.received.deinit();
|
||||
allocator.destroy(self);
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.to_read.clearRetainingCapacity();
|
||||
self.received.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
pub fn add(self: *Self, value: []const u8) *Self {
|
||||
self.to_read.appendSlice(value) catch unreachable;
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn read(self: *Self, buf: []u8) !usize {
|
||||
std.debug.assert(!self.closed);
|
||||
|
||||
const read_index = self.read_index;
|
||||
const items = self.to_read.items;
|
||||
|
||||
if (read_index == items.len) {
|
||||
return 0;
|
||||
}
|
||||
if (buf.len == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// let's fragment this message
|
||||
const left_to_read = items.len - read_index;
|
||||
const max_can_read = if (buf.len < left_to_read) buf.len else left_to_read;
|
||||
|
||||
const random = self.random.random();
|
||||
const to_read = random.uintAtMost(usize, max_can_read - 1) + 1;
|
||||
|
||||
var data = items[read_index..(read_index+to_read)];
|
||||
if (data.len > buf.len) {
|
||||
// we have more data than we have space in buf (our target)
|
||||
// we'll give it when it can take
|
||||
data = data[0..buf.len];
|
||||
}
|
||||
self.read_index = read_index + data.len;
|
||||
|
||||
// std.debug.print("TEST: {d} {d} {d}\n", .{data.len, read_index, max_can_read});
|
||||
for (data, 0..) |b, i| {
|
||||
buf[i] = b;
|
||||
}
|
||||
|
||||
return data.len;
|
||||
}
|
||||
|
||||
// store messages that are written to the stream
|
||||
pub fn writeAll(self: *Self, data: []const u8) !void {
|
||||
self.received.appendSlice(data) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn close(self: *Self) void {
|
||||
self.closed = true;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn randomString(random: std.rand.Random, a: std.mem.Allocator, max: usize) []u8 {
|
||||
var buf = a.alloc(u8, random.uintAtMost(usize, max) + 1) catch unreachable;
|
||||
const valid = "abcdefghijklmnopqrstuvwxyz0123456789-_/";
|
||||
for (0..buf.len) |i| {
|
||||
buf[i] = valid[random.uintAtMost(usize, valid.len-1)];
|
||||
}
|
||||
return buf;
|
||||
}
|
509
src/.deps/http.zig/src/testing.zig
Normal file
509
src/.deps/http.zig/src/testing.zig
Normal file
|
@ -0,0 +1,509 @@
|
|||
// Helpers for application developers to be able to mock
|
||||
// request and parse responses
|
||||
const std = @import("std");
|
||||
const t = @import("t.zig");
|
||||
const httpz = @import("httpz.zig");
|
||||
|
||||
const ArrayList = std.ArrayList;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
pub fn init(config: httpz.Config) Testing {
|
||||
var arena = t.allocator.create(std.heap.ArenaAllocator) catch unreachable;
|
||||
arena.* = std.heap.ArenaAllocator.init(t.allocator);
|
||||
|
||||
var req = t.allocator.create(httpz.Request) catch unreachable;
|
||||
req.init(t.allocator, arena.allocator(), config.request) catch unreachable;
|
||||
req.url = httpz.Url.parse("/");
|
||||
|
||||
var res = t.allocator.create(httpz.Response) catch unreachable;
|
||||
res.init(t.allocator, arena.allocator(), config.response) catch unreachable;
|
||||
res.stream = t.Stream.init();
|
||||
|
||||
return Testing{
|
||||
.req = req,
|
||||
.res = res,
|
||||
._arena = arena,
|
||||
.arena = arena.allocator(),
|
||||
};
|
||||
}
|
||||
|
||||
pub const Testing = struct {
|
||||
_arena: *std.heap.ArenaAllocator,
|
||||
req: *httpz.Request,
|
||||
res: *httpz.Response,
|
||||
arena: std.mem.Allocator,
|
||||
free_body: bool = false,
|
||||
parsed_response: ?Response = null,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const Response = struct {
|
||||
status: u16,
|
||||
raw: []const u8,
|
||||
body: []const u8,
|
||||
// Only populated if getJson() is called. Need to keep it around as we
|
||||
// need to free the memory it allocated
|
||||
json_value: ?std.json.ValueTree,
|
||||
headers: std.StringHashMap([]const u8),
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
// the header function lowercased the provided header name (to be
|
||||
// consistent with the real request parsing, so we need to free that memory)
|
||||
const headers = self.req.headers;
|
||||
for (0..headers.len) |i| {
|
||||
t.allocator.free(headers.keys[i]);
|
||||
}
|
||||
|
||||
if (self.free_body) {
|
||||
t.allocator.free(self.req.bd.?);
|
||||
}
|
||||
|
||||
self.req.deinit(t.allocator);
|
||||
t.allocator.destroy(self.req);
|
||||
|
||||
self.res.stream.deinit();
|
||||
|
||||
self.res.deinit(t.allocator);
|
||||
t.allocator.destroy(self.res);
|
||||
|
||||
self._arena.deinit();
|
||||
t.allocator.destroy(self._arena);
|
||||
|
||||
if (self.parsed_response) |*pr| {
|
||||
pr.headers.deinit();
|
||||
t.allocator.free(pr.raw);
|
||||
if (pr.json_value) |*jv| {
|
||||
jv.deinit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn url(self: *Self, u: []const u8) void {
|
||||
self.req.url = httpz.Url.parse(u);
|
||||
}
|
||||
|
||||
pub fn param(self: *Self, name: []const u8, value: []const u8) void {
|
||||
// This is ugly, but the Param structure is optimized for how the router
|
||||
// works, so we don't have a clean API for setting 1 key=value pair. We'll
|
||||
// just dig into the internals instead
|
||||
var p = &self.req.params;
|
||||
p.names[p.len] = name;
|
||||
p.values[p.len] = value;
|
||||
p.len += 1;
|
||||
}
|
||||
|
||||
pub fn query(self: *Self, name: []const u8, value: []const u8) void {
|
||||
self.req.qs_read = true;
|
||||
self.req.qs.add(name, value);
|
||||
}
|
||||
|
||||
pub fn header(self: *Self, name: []const u8, value: []const u8) void {
|
||||
const lower = t.allocator.alloc(u8, name.len) catch unreachable;
|
||||
_ = std.ascii.lowerString(lower, name);
|
||||
self.req.headers.add(lower, value);
|
||||
}
|
||||
|
||||
pub fn body(self: *Self, bd: []const u8) void {
|
||||
self.req.bd_read = true;
|
||||
self.req.bd = bd;
|
||||
}
|
||||
|
||||
pub fn json(self: *Self, value: anytype) void {
|
||||
if (self.free_body) {
|
||||
t.allocator.free(self.req.bd.?);
|
||||
}
|
||||
|
||||
var arr = ArrayList(u8).init(t.allocator);
|
||||
defer arr.deinit();
|
||||
|
||||
std.json.stringify(value, .{}, arr.writer()) catch unreachable;
|
||||
|
||||
const bd = t.allocator.alloc(u8, arr.items.len) catch unreachable;
|
||||
@memcpy(bd, arr.items);
|
||||
self.free_body = true;
|
||||
self.body(bd);
|
||||
}
|
||||
|
||||
pub fn expectStatus(self: Self, expected: u16) !void {
|
||||
try t.expectEqual(expected, self.res.status);
|
||||
}
|
||||
|
||||
pub fn expectBody(self: *Self, expected: []const u8) !void {
|
||||
const pr = try self.parseResponse();
|
||||
try t.expectString(expected, pr.body);
|
||||
}
|
||||
|
||||
pub fn expectJson(self: *Self, expected: anytype) !void {
|
||||
const pr = try self.parseResponse();
|
||||
|
||||
try self.expectHeader("Content-Type", "application/json");
|
||||
|
||||
var jc = JsonComparer.init(self.arena);
|
||||
const diffs = try jc.compare(expected, pr.body);
|
||||
if (diffs.items.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (diffs.items, 0..) |diff, i| {
|
||||
std.debug.print("\n==Difference #{d}==\n", .{i+1});
|
||||
std.debug.print(" {s}: {s}\n Left: {s}\n Right: {s}\n", .{ diff.path, diff.err, diff.a, diff.b});
|
||||
}
|
||||
return error.JsonNotEqual;
|
||||
}
|
||||
|
||||
pub fn expectHeader(self: *Self, name: []const u8, expected: []const u8) !void {
|
||||
const pr = try self.parseResponse();
|
||||
try t.expectString(expected, pr.headers.get(name).?);
|
||||
}
|
||||
|
||||
pub fn expectHeaderCount(self: *Self, expected: u32) !void {
|
||||
const pr = try self.parseResponse();
|
||||
try t.expectEqual(expected, pr.headers.count());
|
||||
}
|
||||
|
||||
pub fn getJson(self: *Self) !std.json.Value {
|
||||
var pr = try self.parseResponse();
|
||||
if (pr.json_value) |jv| return jv.root;
|
||||
|
||||
var parser = std.json.Parser.init(t.allocator, .alloc_always);
|
||||
defer parser.deinit();
|
||||
pr.json_value = (try parser.parse(pr.body));
|
||||
self.parsed_response = pr;
|
||||
return pr.json_value.?.root;
|
||||
}
|
||||
|
||||
pub fn parseResponse(self: *Self) !Response {
|
||||
if (self.parsed_response) |r| return r;
|
||||
try self.res.write();
|
||||
|
||||
const data = self.res.stream.received.items;
|
||||
|
||||
// data won't outlive this function, we want our Response to take ownership
|
||||
// of the full body, since it needs to reference parts of it.
|
||||
const raw = t.allocator.alloc(u8, data.len) catch unreachable;
|
||||
@memcpy(raw, data);
|
||||
|
||||
var status: u16 = 0;
|
||||
var header_length: usize = 0;
|
||||
var headers = std.StringHashMap([]const u8).init(t.allocator);
|
||||
|
||||
var it = std.mem.split(u8, raw, "\r\n");
|
||||
if (it.next()) |line| {
|
||||
header_length = line.len + 2;
|
||||
status = try std.fmt.parseInt(u16, line[9..], 10);
|
||||
} else {
|
||||
return error.InvalidResponseLine;
|
||||
}
|
||||
|
||||
while (it.next()) |line| {
|
||||
header_length += line.len + 2;
|
||||
if (line.len == 0) break;
|
||||
if (std.mem.indexOfScalar(u8, line, ':')) |index| {
|
||||
// +2 to strip out the leading space
|
||||
headers.put(line[0..index], line[index+2..]) catch unreachable;
|
||||
} else {
|
||||
return error.InvalidHeader;
|
||||
}
|
||||
}
|
||||
|
||||
var body_length = raw.len - header_length;
|
||||
if (headers.get("Transfer-Encoding")) |te| {
|
||||
if (std.mem.eql(u8, te, "chunked")) {
|
||||
body_length = decodeChunkedEncoding(raw[header_length..], data[header_length..]);
|
||||
}
|
||||
}
|
||||
|
||||
const pr = Response{
|
||||
.raw = raw,
|
||||
.status = status,
|
||||
.headers = headers,
|
||||
.json_value = null,
|
||||
.body = raw[header_length..header_length + body_length],
|
||||
};
|
||||
|
||||
self.parsed_response = pr;
|
||||
return pr;
|
||||
}
|
||||
};
|
||||
|
||||
fn decodeChunkedEncoding(full_dest: []u8, full_src: []u8) usize {
|
||||
var src = full_src;
|
||||
var dest = full_dest;
|
||||
var length: usize = 0;
|
||||
|
||||
while (true) {
|
||||
var nl = std.mem.indexOfScalar(u8, src, '\r') orelse unreachable;
|
||||
const chunk_length = std.fmt.parseInt(u32, src[0..nl], 16) catch unreachable;
|
||||
if (chunk_length == 0) {
|
||||
if (src[1] == '\r' and src[2] == '\n' and src[3] == '\r' and src[4] == '\n') {
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@memcpy(dest[0..chunk_length], src[nl+2..nl+2+chunk_length]);
|
||||
length += chunk_length;
|
||||
|
||||
dest = dest[chunk_length..];
|
||||
src = src[nl+4+chunk_length..];
|
||||
}
|
||||
return length;
|
||||
|
||||
}
|
||||
|
||||
const JsonComparer = struct {
|
||||
allocator: Allocator,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const Diff = struct {
|
||||
err: []const u8,
|
||||
path: []const u8,
|
||||
a: []const u8,
|
||||
b: []const u8,
|
||||
};
|
||||
|
||||
fn init(allocator: Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
// We compare by getting the string representation of a and b
|
||||
// and then parsing it into a std.json.ValueTree, which we can compare
|
||||
// Either a or b might already be serialized JSON string.
|
||||
fn compare(self: *Self, a: anytype, b: anytype) !ArrayList(Diff) {
|
||||
const allocator = self.allocator;
|
||||
var a_bytes: []const u8 = undefined;
|
||||
if (@TypeOf(a) != []const u8) {
|
||||
// a isn't a string, let's serialize it
|
||||
a_bytes = try self.stringify(a);
|
||||
} else {
|
||||
a_bytes = a;
|
||||
}
|
||||
|
||||
var b_bytes: []const u8 = undefined;
|
||||
if (@TypeOf(b) != []const u8) {
|
||||
// b isn't a string, let's serialize it
|
||||
b_bytes = try self.stringify(b);
|
||||
} else {
|
||||
b_bytes = b;
|
||||
}
|
||||
|
||||
var a_parser = std.json.Parser.init(allocator, .alloc_always);
|
||||
const a_tree = try a_parser.parse(a_bytes);
|
||||
|
||||
var b_parser = std.json.Parser.init(allocator, .alloc_always);
|
||||
const b_tree = try b_parser.parse(b_bytes);
|
||||
|
||||
var diffs = ArrayList(Diff).init(allocator);
|
||||
var path = ArrayList([]const u8).init(allocator);
|
||||
try self.compareValue(a_tree.root, b_tree.root, &diffs, &path);
|
||||
return diffs;
|
||||
}
|
||||
|
||||
fn compareValue(self: *Self, a: std.json.Value, b: std.json.Value, diffs: *ArrayList(Diff), path: *ArrayList([]const u8)) !void {
|
||||
const allocator = self.allocator;
|
||||
|
||||
if (!std.mem.eql(u8, @tagName(a), @tagName(b))) {
|
||||
diffs.append(self.diff("types don't match", path, @tagName(a), @tagName(b))) catch unreachable;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (a) {
|
||||
.null => {},
|
||||
.bool => {
|
||||
if (a.bool != b.bool) {
|
||||
diffs.append(self.diff("not equal", path, self.format(a.bool), self.format(b.bool))) catch unreachable;
|
||||
}
|
||||
},
|
||||
.integer => {
|
||||
if (a.integer != b.integer) {
|
||||
diffs.append(self.diff("not equal", path, self.format(a.integer), self.format(b.integer))) catch unreachable;
|
||||
}
|
||||
},
|
||||
.float => {
|
||||
if (a.float != b.float) {
|
||||
diffs.append(self.diff("not equal", path, self.format(a.float), self.format(b.float))) catch unreachable;
|
||||
}
|
||||
},
|
||||
.number_string => {
|
||||
if (!std.mem.eql(u8, a.number_string, b.number_string)) {
|
||||
diffs.append(self.diff("not equal", path, a.number_string, b.number_string)) catch unreachable;
|
||||
}
|
||||
},
|
||||
.string => {
|
||||
if (!std.mem.eql(u8, a.string, b.string)) {
|
||||
diffs.append(self.diff("not equal", path, a.string, b.string)) catch unreachable;
|
||||
}
|
||||
},
|
||||
.array => {
|
||||
const a_len = a.array.items.len;
|
||||
const b_len = b.array.items.len;
|
||||
if (a_len != b_len) {
|
||||
diffs.append(self.diff("array length", path, self.format(a_len), self.format(b_len))) catch unreachable;
|
||||
return;
|
||||
}
|
||||
for (a.array.items, b.array.items, 0..) |a_item, b_item, i| {
|
||||
try path.append(try std.fmt.allocPrint(allocator, "{d}", .{i}));
|
||||
try self.compareValue(a_item, b_item, diffs, path);
|
||||
_ = path.pop();
|
||||
}
|
||||
},
|
||||
.object => {
|
||||
var it = a.object.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const key = entry.key_ptr.*;
|
||||
try path.append(key);
|
||||
if (b.object.get(key)) |b_item| {
|
||||
try self.compareValue(entry.value_ptr.*, b_item, diffs, path);
|
||||
} else {
|
||||
diffs.append(self.diff("field missing", path, key, "")) catch unreachable;
|
||||
}
|
||||
_ = path.pop();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn diff(self: *Self, err: []const u8, path: *ArrayList([]const u8), a_rep: []const u8, b_rep: []const u8) Diff {
|
||||
const full_path = std.mem.join(self.allocator, ".", path.items) catch unreachable;
|
||||
return .{
|
||||
.a = a_rep,
|
||||
.b = b_rep,
|
||||
.err = err,
|
||||
.path = full_path,
|
||||
};
|
||||
}
|
||||
|
||||
fn stringify(self: *Self, value: anytype) ![]const u8 {
|
||||
var arr = ArrayList(u8).init(self.allocator);
|
||||
try std.json.stringify(value, .{}, arr.writer());
|
||||
return arr.items;
|
||||
}
|
||||
|
||||
fn format(self: *Self, value: anytype) []const u8 {
|
||||
return std.fmt.allocPrint(self.allocator, "{}", .{value}) catch unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
test "testing: params" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.param("id", "over9000");
|
||||
try t.expectString("over9000", ht.req.params.get("id").?);
|
||||
try t.expectEqual(@as(?[]const u8, null), ht.req.params.get("other"));
|
||||
}
|
||||
|
||||
test "testing: query" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.query("search", "tea");
|
||||
ht.query("category", "447");
|
||||
|
||||
const query = try ht.req.query();
|
||||
try t.expectString("tea", query.get("search").?);
|
||||
try t.expectString("447", query.get("category").?);
|
||||
try t.expectEqual(@as(?[]const u8, null), query.get("other"));
|
||||
}
|
||||
|
||||
test "testing: empty query" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
const query = try ht.req.query();
|
||||
try t.expectEqual(@as(usize, 0), query.len);
|
||||
}
|
||||
|
||||
test "testing: query via url" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
ht.url("/hello?duncan=idaho");
|
||||
|
||||
const query = try ht.req.query();
|
||||
try t.expectString("idaho", query.get("duncan").?);
|
||||
}
|
||||
|
||||
test "testing: header" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.header("Search", "tea");
|
||||
ht.header("Category", "447");
|
||||
|
||||
try t.expectString("tea", ht.req.headers.get("search").?);
|
||||
try t.expectString("447", ht.req.headers.get("category").?);
|
||||
try t.expectEqual(@as(?[]const u8, null), ht.req.headers.get("other"));
|
||||
}
|
||||
|
||||
test "testing: body" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.body("the body");
|
||||
try t.expectString("the body", (try ht.req.body()).?);
|
||||
}
|
||||
|
||||
test "testing: json" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.json(.{.over = 9000});
|
||||
try t.expectString("{\"over\":9000}", (try ht.req.body()).?);
|
||||
}
|
||||
|
||||
test "testing: expectBody empty" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
try ht.expectStatus(200);
|
||||
try ht.expectBody("");
|
||||
try ht.expectHeaderCount(1);
|
||||
try ht.expectHeader("Content-Length", "0");
|
||||
}
|
||||
|
||||
test "testing: expectBody" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
ht.res.status = 404;
|
||||
ht.res.body = "nope";
|
||||
|
||||
try ht.expectStatus(404);
|
||||
try ht.expectBody("nope");
|
||||
}
|
||||
|
||||
test "testing: expectJson" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
ht.res.status = 201;
|
||||
try ht.res.json(.{.tea = "keemun", .price = .{.amount = 4990, .discount = 0.1}}, .{});
|
||||
|
||||
try ht.expectStatus(201);
|
||||
try ht.expectJson(.{ .price = .{.discount = 0.1, .amount = 4990}, .tea = "keemun"});
|
||||
}
|
||||
|
||||
test "testing: getJson" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
|
||||
ht.res.status = 201;
|
||||
try ht.res.json(.{.tea = "silver needle"}, .{});
|
||||
|
||||
try ht.expectStatus(201);
|
||||
const json = try ht.getJson();
|
||||
try t.expectString("silver needle", json.object.get("tea").?.string);
|
||||
}
|
||||
|
||||
test "testing: parseResponse" {
|
||||
var ht = init(.{});
|
||||
defer ht.deinit();
|
||||
ht.res.status = 201;
|
||||
try ht.res.json(.{.tea = 33}, .{});
|
||||
|
||||
try ht.expectStatus(201);
|
||||
const res = try ht.parseResponse();
|
||||
try t.expectEqual(@as(u16, 201), res.status);
|
||||
try t.expectEqual(@as(usize, 2), res.headers.count());
|
||||
}
|
238
src/.deps/http.zig/src/url.zig
Normal file
238
src/.deps/http.zig/src/url.zig
Normal file
|
@ -0,0 +1,238 @@
|
|||
const std = @import("std");
|
||||
const t = @import("t.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const ENC_20 = @bitCast(u16, [2]u8{'2', '0'});
|
||||
const ENC_21 = @bitCast(u16, [2]u8{'2', '1'});
|
||||
const ENC_22 = @bitCast(u16, [2]u8{'2', '2'});
|
||||
const ENC_23 = @bitCast(u16, [2]u8{'2', '3'});
|
||||
const ENC_24 = @bitCast(u16, [2]u8{'2', '4'});
|
||||
const ENC_25 = @bitCast(u16, [2]u8{'2', '5'});
|
||||
const ENC_26 = @bitCast(u16, [2]u8{'2', '6'});
|
||||
const ENC_27 = @bitCast(u16, [2]u8{'2', '7'});
|
||||
const ENC_28 = @bitCast(u16, [2]u8{'2', '8'});
|
||||
const ENC_29 = @bitCast(u16, [2]u8{'2', '9'});
|
||||
const ENC_2A = @bitCast(u16, [2]u8{'2', 'A'});
|
||||
const ENC_2B = @bitCast(u16, [2]u8{'2', 'B'});
|
||||
const ENC_2C = @bitCast(u16, [2]u8{'2', 'C'});
|
||||
const ENC_2F = @bitCast(u16, [2]u8{'2', 'F'});
|
||||
const ENC_3A = @bitCast(u16, [2]u8{'3', 'A'});
|
||||
const ENC_3B = @bitCast(u16, [2]u8{'3', 'B'});
|
||||
const ENC_3D = @bitCast(u16, [2]u8{'3', 'D'});
|
||||
const ENC_3F = @bitCast(u16, [2]u8{'3', 'F'});
|
||||
const ENC_40 = @bitCast(u16, [2]u8{'4', '0'});
|
||||
const ENC_5B = @bitCast(u16, [2]u8{'5', 'B'});
|
||||
const ENC_5D = @bitCast(u16, [2]u8{'5', 'D'});
|
||||
|
||||
|
||||
pub const Url = struct {
|
||||
raw: []const u8,
|
||||
path: []const u8,
|
||||
query: []const u8,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn parse(raw: []const u8) Self {
|
||||
var path = raw;
|
||||
var query: []const u8 = "";
|
||||
|
||||
if (std.mem.indexOfScalar(u8, raw, '?')) |index| {
|
||||
path = raw[0..index];
|
||||
query = raw[index+1..];
|
||||
}
|
||||
|
||||
return .{
|
||||
.raw = raw,
|
||||
.path = path,
|
||||
.query = query,
|
||||
};
|
||||
}
|
||||
|
||||
// the special "*" url, which is valid in HTTP OPTIONS request.
|
||||
pub fn star() Self {
|
||||
return .{
|
||||
.raw = "*",
|
||||
.path = "*",
|
||||
.query = "",
|
||||
};
|
||||
}
|
||||
|
||||
pub const UnescapeResult = struct {
|
||||
// Set to the value, whether or not it required unescaped.
|
||||
value: []const u8,
|
||||
|
||||
// true if the value WAS unesected AND placed in buffer
|
||||
buffered: bool,
|
||||
};
|
||||
// std.Url.unescapeString has 2 problems
|
||||
// First, it doesn't convert '+' -> ' '
|
||||
// Second, it _always_ allocates a new string even if nothing needs to
|
||||
// be unescaped
|
||||
// When we _have_ to unescape a key or value, we'll try to store the new
|
||||
// value in our static buffer (if we have space), else we'll fallback to
|
||||
// allocating memory in the arena.
|
||||
pub fn unescape(allocator: Allocator, buffer: []u8, input: []const u8) !UnescapeResult {
|
||||
var has_plus = false;
|
||||
var unescaped_len = input.len;
|
||||
|
||||
var in_i: usize = 0;
|
||||
while (in_i < input.len) {
|
||||
const b = input[in_i];
|
||||
if (b == '%') {
|
||||
if (in_i + 2 >= input.len or !isHex(input[in_i+1]) or !isHex(input[in_i+2])) {
|
||||
return error.InvalidEscapeSequence;
|
||||
}
|
||||
in_i += 3;
|
||||
unescaped_len -= 2;
|
||||
}
|
||||
else if (b == '+') {
|
||||
has_plus = true;
|
||||
in_i += 1;
|
||||
} else {
|
||||
in_i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// no encoding, and no plus? nothing to unescape
|
||||
if (unescaped_len == input.len and !has_plus) {
|
||||
return .{.value = input, .buffered = false};
|
||||
}
|
||||
|
||||
var out = buffer;
|
||||
var buffered = true;
|
||||
if (buffer.len < unescaped_len) {
|
||||
out = try allocator.alloc(u8, unescaped_len);
|
||||
buffered = false;
|
||||
}
|
||||
|
||||
in_i = 0;
|
||||
for (0..unescaped_len) |i| {
|
||||
const b = input[in_i];
|
||||
if (b == '%') {
|
||||
const enc = input[in_i+1..in_i+3];
|
||||
out[i] = switch (@bitCast(u16, enc[0..2].*)) {
|
||||
ENC_20 => ' ',
|
||||
ENC_21 => '!',
|
||||
ENC_22 => '"',
|
||||
ENC_23 => '#',
|
||||
ENC_24 => '$',
|
||||
ENC_25 => '%',
|
||||
ENC_26 => '&',
|
||||
ENC_27 => '\'',
|
||||
ENC_28 => '(',
|
||||
ENC_29 => ')',
|
||||
ENC_2A => '*',
|
||||
ENC_2B => '+',
|
||||
ENC_2C => ',',
|
||||
ENC_2F => '/',
|
||||
ENC_3A => ':',
|
||||
ENC_3B => ';',
|
||||
ENC_3D => '=',
|
||||
ENC_3F => '?',
|
||||
ENC_40 => '@',
|
||||
ENC_5B => '[',
|
||||
ENC_5D => ']',
|
||||
else => unHex(enc[0]) << 4 | unHex(enc[1]),
|
||||
};
|
||||
in_i += 3;
|
||||
} else if (b == '+') {
|
||||
out[i] = ' ';
|
||||
in_i += 1;
|
||||
} else {
|
||||
out[i] = b;
|
||||
in_i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return .{.value = out[0..unescaped_len], .buffered = buffered};
|
||||
}
|
||||
};
|
||||
|
||||
fn isHex(b: u8) bool {
|
||||
return switch (b) {
|
||||
'0'...'9' => true,
|
||||
'a'...'f' => true,
|
||||
'A'...'F' => true,
|
||||
else => false
|
||||
};
|
||||
}
|
||||
|
||||
fn unHex(b: u8) u8 {
|
||||
return switch (b) {
|
||||
'0'...'9' => b - '0',
|
||||
'a'...'f' => b - 'a' + 10,
|
||||
'A'...'F' => b - 'A' + 10,
|
||||
else => unreachable, // should never have gotten here since isHex would have failed
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
test "url: parse" {
|
||||
{
|
||||
// absolute root
|
||||
const url = Url.parse("/");
|
||||
try t.expectString("/", url.raw);
|
||||
try t.expectString("/", url.path);
|
||||
try t.expectString("", url.query);
|
||||
}
|
||||
|
||||
{
|
||||
// absolute path
|
||||
const url = Url.parse("/a/bc/def");
|
||||
try t.expectString("/a/bc/def", url.raw);
|
||||
try t.expectString("/a/bc/def", url.path);
|
||||
try t.expectString("", url.query);
|
||||
}
|
||||
|
||||
{
|
||||
// absolute root with query
|
||||
const url = Url.parse("/?over=9000");
|
||||
try t.expectString("/?over=9000", url.raw);
|
||||
try t.expectString("/", url.path);
|
||||
try t.expectString("over=9000", url.query);
|
||||
}
|
||||
|
||||
{
|
||||
// absolute root with empty query
|
||||
const url = Url.parse("/?");
|
||||
try t.expectString("/?", url.raw);
|
||||
try t.expectString("/", url.path);
|
||||
try t.expectString("", url.query);
|
||||
}
|
||||
|
||||
{
|
||||
// absolute path with query
|
||||
const url = Url.parse("/hello/teg?duncan=idaho&ghanima=atreides");
|
||||
try t.expectString("/hello/teg?duncan=idaho&ghanima=atreides", url.raw);
|
||||
try t.expectString("/hello/teg", url.path);
|
||||
try t.expectString("duncan=idaho&ghanima=atreides", url.query);
|
||||
}
|
||||
}
|
||||
|
||||
test "url: unescape" {
|
||||
var arena = std.heap.ArenaAllocator.init(t.allocator);
|
||||
const allocator = arena.allocator();
|
||||
defer arena.deinit();
|
||||
|
||||
var buffer: [10]u8 = undefined;
|
||||
|
||||
try t.expectError(error.InvalidEscapeSequence, Url.unescape(t.allocator, &buffer, "%"));
|
||||
try t.expectError(error.InvalidEscapeSequence, Url.unescape(t.allocator, &buffer, "%a"));
|
||||
try t.expectError(error.InvalidEscapeSequence, Url.unescape(t.allocator, &buffer, "%1"));
|
||||
try t.expectError(error.InvalidEscapeSequence, Url.unescape(t.allocator, &buffer, "123%45%6"));
|
||||
try t.expectError(error.InvalidEscapeSequence, Url.unescape(t.allocator, &buffer, "%zzzzz"));
|
||||
|
||||
var res = try Url.unescape(allocator, &buffer, "a+b");
|
||||
try t.expectString("a b", res.value);
|
||||
try t.expectEqual(true, res.buffered);
|
||||
|
||||
res = try Url.unescape(allocator, &buffer, "a%20b");
|
||||
try t.expectString("a b", res.value);
|
||||
try t.expectEqual(true, res.buffered);
|
||||
|
||||
const input = "%5C%C3%B6%2F%20%C3%A4%C3%B6%C3%9F%20~~.adas-https%3A%2F%2Fcanvas%3A123%2F%23ads%26%26sad";
|
||||
const expected = "\\ö/ äöß ~~.adas-https://canvas:123/#ads&&sad";
|
||||
res = try Url.unescape(allocator, &buffer, input);
|
||||
try t.expectString(expected, res.value);
|
||||
try t.expectEqual(false, res.buffered);
|
||||
}
|
358
src/.deps/jwt.zig
Normal file
358
src/.deps/jwt.zig
Normal file
|
@ -0,0 +1,358 @@
|
|||
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const ValueTree = std.json.ValueTree;
|
||||
const Value = std.json.Value;
|
||||
const base64url = std.base64.url_safe_no_pad;
|
||||
|
||||
const Algorithm = enum {
|
||||
const Self = @This();
|
||||
|
||||
HS256,
|
||||
HS384,
|
||||
HS512,
|
||||
|
||||
pub fn jsonStringify(value: Self, options: std.json.StringifyOptions, writer: anytype) @TypeOf(writer).Error!void {
|
||||
try std.json.stringify(std.meta.tagName(value), options, writer);
|
||||
}
|
||||
|
||||
pub fn CryptoFn(comptime self: Self) type {
|
||||
return switch (self) {
|
||||
.HS256 => std.crypto.auth.hmac.sha2.HmacSha256,
|
||||
.HS384 => std.crypto.auth.hmac.sha2.HmacSha384,
|
||||
.HS512 => std.crypto.auth.hmac.sha2.HmacSha512,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const JWTType = enum {
|
||||
JWS,
|
||||
JWE,
|
||||
};
|
||||
|
||||
pub const SignatureOptions = struct {
|
||||
key: []const u8,
|
||||
kid: ?[]const u8 = null,
|
||||
};
|
||||
|
||||
pub fn encode(allocator: std.mem.Allocator, comptime alg: Algorithm, payload: anytype, signatureOptions: SignatureOptions) ![]const u8 {
|
||||
var payload_json = std.ArrayList(u8).init(allocator);
|
||||
defer payload_json.deinit();
|
||||
|
||||
try std.json.stringify(payload, .{}, payload_json.writer());
|
||||
|
||||
return try encodeMessage(allocator, alg, payload_json.items, signatureOptions);
|
||||
}
|
||||
|
||||
pub fn encodeMessage(allocator: std.mem.Allocator, comptime alg: Algorithm, message: []const u8, signatureOptions: SignatureOptions) ![]const u8 {
|
||||
var protected_header = std.json.ObjectMap.init(allocator);
|
||||
defer protected_header.deinit();
|
||||
try protected_header.put("alg", .{ .string = @tagName(alg) });
|
||||
try protected_header.put("typ", .{ .string = "JWT" });
|
||||
if (signatureOptions.kid) |kid| {
|
||||
try protected_header.put("kid", .{ .string = kid });
|
||||
}
|
||||
|
||||
var protected_header_json = std.ArrayList(u8).init(allocator);
|
||||
defer protected_header_json.deinit();
|
||||
|
||||
try std.json.stringify(Value{ .object = protected_header }, .{}, protected_header_json.writer());
|
||||
|
||||
const message_base64_len = base64url.Encoder.calcSize(message.len);
|
||||
const protected_header_base64_len = base64url.Encoder.calcSize(protected_header_json.items.len);
|
||||
|
||||
var jwt_text = std.ArrayList(u8).init(allocator);
|
||||
defer jwt_text.deinit();
|
||||
try jwt_text.resize(message_base64_len + 1 + protected_header_base64_len);
|
||||
|
||||
var protected_header_base64 = jwt_text.items[0..protected_header_base64_len];
|
||||
var message_base64 = jwt_text.items[protected_header_base64_len + 1 ..][0..message_base64_len];
|
||||
|
||||
_ = base64url.Encoder.encode(protected_header_base64, protected_header_json.items);
|
||||
jwt_text.items[protected_header_base64_len] = '.';
|
||||
_ = base64url.Encoder.encode(message_base64, message);
|
||||
|
||||
const signature = &generate_signature(alg, signatureOptions.key, protected_header_base64, message_base64);
|
||||
const signature_base64_len = base64url.Encoder.calcSize(signature.len);
|
||||
|
||||
try jwt_text.resize(message_base64_len + 1 + protected_header_base64_len + 1 + signature_base64_len);
|
||||
var signature_base64 = jwt_text.items[message_base64_len + 1 + protected_header_base64_len + 1 ..][0..signature_base64_len];
|
||||
|
||||
jwt_text.items[message_base64_len + 1 + protected_header_base64_len] = '.';
|
||||
_ = base64url.Encoder.encode(signature_base64, signature);
|
||||
|
||||
return jwt_text.toOwnedSlice();
|
||||
}
|
||||
|
||||
pub fn validate(comptime P: type, allocator: std.mem.Allocator, comptime alg: Algorithm, tokenText: []const u8, signatureOptions: SignatureOptions) !P {
|
||||
const message = try validateMessage(allocator, alg, tokenText, signatureOptions);
|
||||
defer allocator.free(message);
|
||||
|
||||
// 10. Verify that the resulting octet sequence is a UTF-8-encoded
|
||||
// representation of a completely valid JSON object conforming to
|
||||
// RFC 7159 [RFC7159]; let the JWT Claims Set be this JSON object.
|
||||
return std.json.parseFromSlice(P, allocator, message, .{});
|
||||
}
|
||||
|
||||
pub fn validateFree(comptime P: type, allocator: std.mem.Allocator, value: P) void {
|
||||
std.json.parseFree(P, allocator, value);
|
||||
}
|
||||
|
||||
pub fn validateMessage(allocator: std.mem.Allocator, comptime expectedAlg: Algorithm, tokenText: []const u8, signatureOptions: SignatureOptions) ![]const u8 {
|
||||
// 1. Verify that the JWT contains at least one period ('.')
|
||||
// character.
|
||||
// 2. Let the Encoded JOSE Header be the portion of the JWT before the
|
||||
// first period ('.') character.
|
||||
var end_of_jose_base64 = std.mem.indexOfScalar(u8, tokenText, '.') orelse return error.InvalidFormat;
|
||||
const jose_base64 = tokenText[0..end_of_jose_base64];
|
||||
|
||||
// 3. Base64url decode the Encoded JOSE Header following the
|
||||
// restriction that no line breaks, whitespace, or other additional
|
||||
// characters have been used.
|
||||
var jose_json = try allocator.alloc(u8, try base64url.Decoder.calcSizeForSlice(jose_base64));
|
||||
defer allocator.free(jose_json);
|
||||
try base64url.Decoder.decode(jose_json, jose_base64);
|
||||
|
||||
// 4. Verify that the resulting octet sequence is a UTF-8-encoded
|
||||
// representation of a completely valid JSON object conforming to
|
||||
// RFC 7159 [RFC7159]; let the JOSE Header be this JSON object.
|
||||
|
||||
// TODO: Make sure the JSON parser confirms everything above
|
||||
|
||||
var parser = std.json.Parser.init(allocator, .alloc_always);
|
||||
defer parser.deinit();
|
||||
|
||||
var cty_opt = @as(?[]const u8, null);
|
||||
defer if (cty_opt) |cty| allocator.free(cty);
|
||||
|
||||
var jwt_tree = try parser.parse(jose_json);
|
||||
defer jwt_tree.deinit();
|
||||
|
||||
// 5. Verify that the resulting JOSE Header includes only parameters
|
||||
// and values whose syntax and semantics are both understood and
|
||||
// supported or that are specified as being ignored when not
|
||||
// understood.
|
||||
|
||||
var jwt_root = jwt_tree.root;
|
||||
if (jwt_root != .object) return error.InvalidFormat;
|
||||
|
||||
{
|
||||
var alg_val = jwt_root.object.get("alg") orelse return error.InvalidFormat;
|
||||
if (alg_val != .string) return error.InvalidFormat;
|
||||
const alg = std.meta.stringToEnum(Algorithm, alg_val.string) orelse return error.InvalidAlgorithm;
|
||||
|
||||
// Make sure that the algorithm matches: https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
|
||||
if (alg != expectedAlg) return error.InvalidAlgorithm;
|
||||
|
||||
// TODO: Determine if "jku"/"jwk" need to be parsed and validated
|
||||
|
||||
if (jwt_root.object.get("crit")) |crit_val| {
|
||||
if (crit_val != .array) return error.InvalidFormat;
|
||||
const crit = crit_val.array;
|
||||
if (crit.items.len == 0) return error.InvalidFormat;
|
||||
|
||||
// TODO: Implement or allow extensions?
|
||||
return error.UnknownExtension;
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Determine whether the JWT is a JWS or a JWE using any of the
|
||||
// methods described in Section 9 of [JWE].
|
||||
|
||||
const jwt_type = determine_jwt_type: {
|
||||
// From Section 9 of the JWE specification:
|
||||
// > o If the object is using the JWS Compact Serialization or the JWE
|
||||
// > Compact Serialization, the number of base64url-encoded segments
|
||||
// > separated by period ('.') characters differs for JWSs and JWEs.
|
||||
// > JWSs have three segments separated by two period ('.') characters.
|
||||
// > JWEs have five segments separated by four period ('.') characters.
|
||||
switch (std.mem.count(u8, tokenText, ".")) {
|
||||
2 => break :determine_jwt_type JWTType.JWS,
|
||||
4 => break :determine_jwt_type JWTType.JWE,
|
||||
else => return error.InvalidFormat,
|
||||
}
|
||||
};
|
||||
|
||||
// 7. Depending upon whether the JWT is a JWS or JWE, there are two
|
||||
// cases:
|
||||
const message_base64 = get_message: {
|
||||
switch (jwt_type) {
|
||||
// If the JWT is a JWS, follow the steps specified in [JWS] for
|
||||
// validating a JWS. Let the Message be the result of base64url
|
||||
// decoding the JWS Payload.
|
||||
.JWS => {
|
||||
var section_iter = std.mem.split(u8, tokenText, ".");
|
||||
std.debug.assert(section_iter.next() != null);
|
||||
const payload_base64 = section_iter.next().?;
|
||||
const signature_base64 = section_iter.rest();
|
||||
|
||||
var signature = try allocator.alloc(u8, try base64url.Decoder.calcSizeForSlice(signature_base64));
|
||||
defer allocator.free(signature);
|
||||
try base64url.Decoder.decode(signature, signature_base64);
|
||||
|
||||
const gen_sig = &generate_signature(expectedAlg, signatureOptions.key, jose_base64, payload_base64);
|
||||
if (!std.mem.eql(u8, signature, gen_sig)) {
|
||||
return error.InvalidSignature;
|
||||
}
|
||||
|
||||
break :get_message try allocator.dupe(u8, payload_base64);
|
||||
},
|
||||
.JWE => {
|
||||
// Else, if the JWT is a JWE, follow the steps specified in
|
||||
// [JWE] for validating a JWE. Let the Message be the resulting
|
||||
// plaintext.
|
||||
return error.Unimplemented;
|
||||
},
|
||||
}
|
||||
};
|
||||
defer allocator.free(message_base64);
|
||||
|
||||
// 8. If the JOSE Header contains a "cty" (content type) value of
|
||||
// "JWT", then the Message is a JWT that was the subject of nested
|
||||
// signing or encryption operations. In this case, return to Step
|
||||
// 1, using the Message as the JWT.
|
||||
if (jwt_root.object.get("cty")) |cty_val| {
|
||||
if (cty_val != .string) return error.InvalidFormat;
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
// 9. Otherwise, base64url decode the Message following the
|
||||
// restriction that no line breaks, whitespace, or other additional
|
||||
// characters have been used.
|
||||
var message = try allocator.alloc(u8, try base64url.Decoder.calcSizeForSlice(message_base64));
|
||||
errdefer allocator.free(message);
|
||||
try base64url.Decoder.decode(message, message_base64);
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
pub fn generate_signature(comptime algo: Algorithm, key: []const u8, protectedHeaderBase64: []const u8, payloadBase64: []const u8) [algo.CryptoFn().mac_length]u8 {
|
||||
const T = algo.CryptoFn();
|
||||
var h = T.init(key);
|
||||
h.update(protectedHeaderBase64);
|
||||
h.update(".");
|
||||
h.update(payloadBase64);
|
||||
|
||||
var out: [T.mac_length]u8 = undefined;
|
||||
h.final(&out);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
test "generate jws based tokens" {
|
||||
const payload = .{
|
||||
.sub = "1234567890",
|
||||
.name = "John Doe",
|
||||
.iat = 1516239022,
|
||||
};
|
||||
|
||||
try test_generate(
|
||||
.HS256,
|
||||
payload,
|
||||
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SVT7VUK8eOve-SCacPaU_bkzT3SFr9wk5EQciofG4Qo",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
try test_generate(
|
||||
.HS384,
|
||||
payload,
|
||||
"eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.MSnfJgb61edr7STbvEqi4Mj3Vvmb8Kh3lsnlXacv0cDAGYhBOpNmOrhWwQgTJCKj",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
try test_generate(
|
||||
.HS512,
|
||||
payload,
|
||||
"eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.39Xvky4dIVLaVaOW5BgbO7smTZUyvIcRtBE3i2hVW3GbjSeUFmpwRbMy94CfvgHC3KHT6V4-pnkNTotCWer-cw",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
}
|
||||
|
||||
test "validate jws based tokens" {
|
||||
const expected = TestValidatePayload{
|
||||
.iss = "joe",
|
||||
.exp = 1300819380,
|
||||
.@"http://example.com/is_root" = true,
|
||||
};
|
||||
|
||||
try test_validate(
|
||||
.HS256,
|
||||
expected,
|
||||
"eyJ0eXAiOiJKV1QiLA0KICJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJqb2UiLA0KICJleHAiOjEzMDA4MTkzODAsDQogImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
try test_validate(
|
||||
.HS384,
|
||||
expected,
|
||||
"eyJhbGciOiJIUzM4NCIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJqb2UiLCJleHAiOjEzMDA4MTkzODAsImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.2B5ucfIDtuSVRisXjPwZlqPAwgEicFIX7Gd2r8rlAbLukenHTW0Rbx1ca1VJSyLg",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
try test_validate(
|
||||
.HS512,
|
||||
expected,
|
||||
"eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJqb2UiLCJleHAiOjEzMDA4MTkzODAsImh0dHA6Ly9leGFtcGxlLmNvbS9pc19yb290Ijp0cnVlfQ.TrGchM_jCqCTAYUQlFmXt-KOyKO0O2wYYW5fUSV8jtdgqWJ74cqNA1zc9Ix7TU4qJ-Y32rKmP9Xpu99yiShx6g",
|
||||
"AyM1SysPpbyDfgZld3umj1qzKObwVMkoqQ-EstJQLr_T-1qS0gZH75aKtMN3Yj0iPS4hcgUuTwjAzZr1Z9CAow",
|
||||
);
|
||||
}
|
||||
|
||||
test "generate and then validate jws token" {
|
||||
try test_generate_then_validate(.HS256, .{ .key = "a jws hmac sha-256 test key" });
|
||||
try test_generate_then_validate(.HS384, .{ .key = "a jws hmac sha-384 test key" });
|
||||
}
|
||||
|
||||
const TestPayload = struct {
|
||||
sub: []const u8,
|
||||
name: []const u8,
|
||||
iat: i64,
|
||||
};
|
||||
|
||||
fn test_generate(comptime algorithm: Algorithm, payload: TestPayload, expected: []const u8, key_base64: []const u8) !void {
|
||||
var key = try std.testing.allocator.alloc(u8, try base64url.Decoder.calcSizeForSlice(key_base64));
|
||||
defer std.testing.allocator.free(key);
|
||||
try base64url.Decoder.decode(key, key_base64);
|
||||
|
||||
const token = try encode(std.testing.allocator, algorithm, payload, .{ .key = key });
|
||||
defer std.testing.allocator.free(token);
|
||||
|
||||
try std.testing.expectEqualSlices(u8, expected, token);
|
||||
}
|
||||
|
||||
const TestValidatePayload = struct {
|
||||
iss: []const u8,
|
||||
exp: i64,
|
||||
@"http://example.com/is_root": bool,
|
||||
};
|
||||
|
||||
fn test_validate(comptime algorithm: Algorithm, expected: TestValidatePayload, token: []const u8, key_base64: []const u8) !void {
|
||||
var key = try std.testing.allocator.alloc(u8, try base64url.Decoder.calcSizeForSlice(key_base64));
|
||||
defer std.testing.allocator.free(key);
|
||||
try base64url.Decoder.decode(key, key_base64);
|
||||
|
||||
var claims = try validate(TestValidatePayload, std.testing.allocator, algorithm, token, .{ .key = key });
|
||||
defer validateFree(TestValidatePayload, std.testing.allocator, claims);
|
||||
|
||||
try std.testing.expectEqualSlices(u8, expected.iss, claims.iss);
|
||||
try std.testing.expectEqual(expected.exp, claims.exp);
|
||||
try std.testing.expectEqual(expected.@"http://example.com/is_root", claims.@"http://example.com/is_root");
|
||||
}
|
||||
|
||||
fn test_generate_then_validate(comptime alg: Algorithm, signatureOptions: SignatureOptions) !void {
|
||||
const Payload = struct {
|
||||
sub: []const u8,
|
||||
name: []const u8,
|
||||
iat: i64,
|
||||
};
|
||||
const payload = Payload{
|
||||
.sub = "1234567890",
|
||||
.name = "John Doe",
|
||||
.iat = 1516239022,
|
||||
};
|
||||
|
||||
const token = try encode(std.testing.allocator, alg, payload, signatureOptions);
|
||||
defer std.testing.allocator.free(token);
|
||||
|
||||
var decoded = try validate(Payload, std.testing.allocator, alg, token, signatureOptions);
|
||||
defer validateFree(Payload, std.testing.allocator, decoded);
|
||||
|
||||
try std.testing.expectEqualSlices(u8, payload.sub, decoded.sub);
|
||||
try std.testing.expectEqualSlices(u8, payload.name, decoded.name);
|
||||
try std.testing.expectEqual(payload.iat, decoded.iat);
|
||||
}
|
518
src/.deps/nanoid.zig
Normal file
518
src/.deps/nanoid.zig
Normal file
|
@ -0,0 +1,518 @@
|
|||
const std = @import("std");
|
||||
|
||||
/// A collection of useful alphabets that can be used to generate ids.
|
||||
pub const alphabets = struct
|
||||
{
|
||||
/// Numbers from 0 to 9.
|
||||
pub const numbers = "0123456789";
|
||||
|
||||
/// English hexadecimal with lowercase characters.
|
||||
pub const hexadecimal_lowercase = numbers ++ "abcdef";
|
||||
|
||||
/// English hexadecimal with uppercase characters.
|
||||
pub const hexadecimal_uppercase = numbers ++ "ABCDEF";
|
||||
|
||||
/// Lowercase English letters.
|
||||
pub const lowercase = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
/// Uppercase English letters.
|
||||
pub const uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
|
||||
/// Numbers and english letters without lookalikes: 1, l, I, 0, O, o, u, v, 5, S, s, 2, Z.
|
||||
pub const no_look_alikes = "346789ABCDEFGHJKLMNPQRTUVWXYabcdefghijkmnpqrtwxyz";
|
||||
|
||||
/// Same as nolookalikes but with removed vowels and following letters: 3, 4, x, X, V.
|
||||
/// This list should protect you from accidentally getting obscene words in generated strings.
|
||||
pub const no_look_alikes_safe = "6789BCDFGHJKLMNPQRTWbcdfghjkmnpqrtwz";
|
||||
|
||||
/// Combination of all the lowercase, uppercase characters and numbers from 0 to 9.
|
||||
/// Does not include any symbols or special characters.
|
||||
pub const alphanumeric = numbers ++ lowercase ++ uppercase;
|
||||
|
||||
/// URL friendly characters used by the default generate procedure.
|
||||
pub const default = "_-" ++ alphanumeric;
|
||||
};
|
||||
|
||||
/// An array of all the alphabets.
|
||||
pub const all_alphabets = internal_utils.collectAllConstantsInStruct(alphabets, []const u8);
|
||||
|
||||
/// The default length for nanoids.
|
||||
pub const default_id_len = 21;
|
||||
|
||||
/// The mask for the default alphabet length.
|
||||
pub const default_mask = computeMask(alphabets.default.len);
|
||||
|
||||
/// This should be enough memory for an rng step buffer when generating an id of default length regardless of alphabet length.
|
||||
/// It can be used for allocating your rng step buffer if you know the length of your id is `<= default_id_len`.
|
||||
pub const rng_step_buffer_len_sufficient_for_default_length_ids = computeSufficientRngStepBufferLengthFor(default_id_len);
|
||||
|
||||
/// The maximum length of the alphabet accepted by the nanoid algorithm.
|
||||
pub const max_alphabet_len: u8 = std.math.maxInt(u8);
|
||||
|
||||
/// Computes the mask necessary for the nanoid algorithm given an alphabet length.
|
||||
/// The mask is used to transform a random byte into an index into an array of length `alphabet_len`.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - `alphabet_len`: the length of the alphabet used. The alphabet length must be in the range `(0, max_alphabet_len]`.
|
||||
pub fn computeMask(alphabet_len: u8) u8
|
||||
{
|
||||
std.debug.assert(alphabet_len > 0);
|
||||
|
||||
const clz: u5 = @clz(@as(u31, (alphabet_len - 1) | 1));
|
||||
const mask = (@as(u32, 2) << (31 - clz)) - 1;
|
||||
const result = @truncate(u8, mask);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Computes the length necessary for a buffer which can hold the random byte in a step of a the nanoid generation algorithm given a
|
||||
/// certain alphabet length.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - `id_len`: the length of the id you will generate. Can be any value.
|
||||
///
|
||||
/// - `alphabet_len`: the length of the alphabet used. The alphabet length must be in the range `(0, max_alphabet_len]`.
|
||||
pub fn computeRngStepBufferLength(id_len: usize, alphabet_len: u8) usize
|
||||
{
|
||||
// @Note:
|
||||
// Original dev notes regarding this algorithm.
|
||||
// Source: https://github.com/ai/nanoid/blob/0454333dee4612d2c2e163d271af6cc3ce1e5aa4/index.js#L45
|
||||
//
|
||||
// "Next, a step determines how many random bytes to generate.
|
||||
// The number of random bytes gets decided upon the ID length, mask,
|
||||
// alphabet length, and magic number 1.6 (using 1.6 peaks at performance
|
||||
// according to benchmarks)."
|
||||
const mask_f = @intToFloat(f64, computeMask(alphabet_len));
|
||||
const id_len_f = @intToFloat(f64, id_len);
|
||||
const alphabet_size_f = @intToFloat(f64, alphabet_len);
|
||||
const step_buffer_len = @ceil(1.6 * mask_f * id_len_f / alphabet_size_f);
|
||||
const result = @floatToInt(usize, step_buffer_len);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// This function computes the biggest possible rng step buffer length necessary
|
||||
/// to compute an id with a max length of `max_id_len` regardless of the alphabet length.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - `max_id_len`: The biggest id length for which the step buffer length needs to be sufficient.
|
||||
pub fn computeSufficientRngStepBufferLengthFor(max_id_len: usize) usize
|
||||
{
|
||||
@setEvalBranchQuota(2500);
|
||||
var max_step_buffer_len: usize = 0;
|
||||
var i: u9 = 1;
|
||||
while (i <= max_alphabet_len) : (i += 1)
|
||||
{
|
||||
const alphabet_len = @truncate(u8, i);
|
||||
const step_buffer_len = computeRngStepBufferLength(max_id_len, alphabet_len);
|
||||
|
||||
if (step_buffer_len > max_step_buffer_len)
|
||||
{
|
||||
max_step_buffer_len = step_buffer_len;
|
||||
}
|
||||
}
|
||||
|
||||
return max_step_buffer_len;
|
||||
}
|
||||
|
||||
/// Generates a nanoid inside `result_buffer` and returns it back to the caller.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - `rng`: a random number generator.
|
||||
/// Provide a secure one such as `std.rand.DefaultCsprng` and seed it properly if you have security concerns.
|
||||
/// See `Regarding RNGs` in `readme.md` for more information.
|
||||
///
|
||||
/// - `alphabet`: an array of the bytes that will be used in the id, its length must be in the range `(0, max_alphabet_len]`.
|
||||
/// Consider the options from `nanoid.alphabets`.
|
||||
///
|
||||
/// - `result_buffer`: is an output buffer that will be filled *completely* with random bytes from `alphabet`, thus generating an id of
|
||||
/// length `result_buffer.len`. This buffer will be returned at the end of the function.
|
||||
///
|
||||
/// - `step_buffer`: The buffer will be filled with random bytes using `rng.bytes()`.
|
||||
/// Must be at least `computeRngStepBufferLength(computeMask(@truncate(u8, alphabet.len)), result_buffer.len, alphabet.len)` bytes.
|
||||
pub fn generateEx(rng: std.rand.Random, alphabet: []const u8, result_buffer: []u8, step_buffer: []u8) []u8
|
||||
{
|
||||
std.debug.assert(alphabet.len > 0 and alphabet.len <= max_alphabet_len);
|
||||
|
||||
const alphabet_len = @truncate(u8, alphabet.len);
|
||||
const mask = computeMask(alphabet_len);
|
||||
const necessary_step_buffer_len = computeRngStepBufferLength(result_buffer.len, alphabet_len);
|
||||
const actual_step_buffer = step_buffer[0..necessary_step_buffer_len];
|
||||
|
||||
var result_iter: usize = 0;
|
||||
while (true)
|
||||
{
|
||||
rng.bytes(actual_step_buffer);
|
||||
|
||||
for (actual_step_buffer) |it|
|
||||
{
|
||||
const alphabet_index = it & mask;
|
||||
|
||||
if (alphabet_index >= alphabet_len)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
result_buffer[result_iter] = alphabet[alphabet_index];
|
||||
|
||||
if (result_iter == result_buffer.len - 1)
|
||||
{
|
||||
return result_buffer;
|
||||
}
|
||||
else
|
||||
{
|
||||
result_iter += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a nanoid inside `result_buffer` and returns it back to the caller.
|
||||
///
|
||||
/// This function will use `rng.int` instead of `rng.bytes` thus avoiding the need for a step buffer.
|
||||
/// Depending on your choice of rng this can be useful, since you avoid the need for a step buffer,
|
||||
/// but repeated calls to `rng.int` might be slower than a single call `rng.bytes`.
|
||||
///
|
||||
/// Parameters:
|
||||
/// - `rng`: a random number generator.
|
||||
/// Provide a secure one such as `std.rand.DefaultCsprng` and seed it properly if you have security concerns.
|
||||
/// See `Regarding RNGs` in `readme.md` for more information.
|
||||
///
|
||||
/// - `alphabet`: an array of the bytes that will be used in the id, its length must be in the range `(0, max_alphabet_len]`.
|
||||
/// Consider the options from `nanoid.alphabets`.
|
||||
///
|
||||
/// - `result_buffer` is an output buffer that will be filled *completely* with random bytes from `alphabet`, thus generating an id of
|
||||
/// length `result_buffer.len`. This buffer will be returned at the end of the function.
|
||||
pub fn generateExWithIterativeRng(rng: std.rand.Random, alphabet: []const u8, result_buffer: []u8) []u8
|
||||
{
|
||||
std.debug.assert(result_buffer.len > 0);
|
||||
std.debug.assert(alphabet.len > 0 and alphabet.len <= max_alphabet_len);
|
||||
|
||||
const alphabet_len = @truncate(u8, alphabet.len);
|
||||
const mask = computeMask(alphabet_len);
|
||||
|
||||
var result_iter: usize = 0;
|
||||
while (true)
|
||||
{
|
||||
const random_byte = rng.int(u8);
|
||||
|
||||
const alphabet_index = random_byte & mask;
|
||||
|
||||
if (alphabet_index >= alphabet_len)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
result_buffer[result_iter] = alphabet[alphabet_index];
|
||||
|
||||
if (result_iter == result_buffer.len - 1)
|
||||
{
|
||||
return result_buffer;
|
||||
}
|
||||
else
|
||||
{
|
||||
result_iter += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return result_buffer;
|
||||
}
|
||||
|
||||
/// Generates a nanoid using the provided alphabet.
|
||||
///
|
||||
/// Parameters:
|
||||
///
|
||||
/// - `rng`: a random number generator.
|
||||
/// Provide a secure one such as `std.rand.DefaultCsprng` and seed it properly if you have security concerns.
|
||||
/// See `Regarding RNGs` in `README.md` for more information.
|
||||
///
|
||||
/// - `alphabet`: an array of the bytes that will be used in the id, its length must be in the range `(0, max_alphabet_len]`.
|
||||
pub fn generateWithAlphabet(rng: std.rand.Random, alphabet: []const u8) [default_id_len]u8
|
||||
{
|
||||
var nanoid: [default_id_len]u8 = undefined;
|
||||
var step_buffer: [rng_step_buffer_len_sufficient_for_default_length_ids]u8 = undefined;
|
||||
_ = generateEx(rng, alphabet, &nanoid, &step_buffer);
|
||||
return nanoid;
|
||||
}
|
||||
|
||||
/// Generates a nanoid using the default alphabet.
|
||||
///
|
||||
/// Parameters:
|
||||
///
|
||||
/// - `rng`: a random number generator.
|
||||
/// Provide a secure one such as `std.rand.DefaultCsprng` and seed it properly if you have security concerns.
|
||||
/// See `Regarding RNGs` in `README.md` for more information.
|
||||
pub fn generate(rng: std.rand.Random) [default_id_len]u8
|
||||
{
|
||||
const result = generateWithAlphabet(rng, alphabets.default);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Non public utility functions used mostly in unit tests.
|
||||
const internal_utils = struct
|
||||
{
|
||||
fn makeDefaultCsprng() std.rand.DefaultCsprng
|
||||
{
|
||||
// Generate seed
|
||||
var seed: [std.rand.DefaultCsprng.secret_seed_length]u8 = undefined;
|
||||
std.crypto.random.bytes(&seed);
|
||||
|
||||
// Initialize the rng and allocator
|
||||
var rng = std.rand.DefaultCsprng.init(seed);
|
||||
return rng;
|
||||
}
|
||||
|
||||
fn makeDefaultPrngWithConstantSeed() std.rand.DefaultPrng
|
||||
{
|
||||
var rng = std.rand.DefaultPrng.init(0);
|
||||
return rng;
|
||||
}
|
||||
|
||||
fn makeDefaultCsprngWithConstantSeed() std.rand.DefaultCsprng
|
||||
{
|
||||
// Generate seed
|
||||
var seed: [std.rand.DefaultCsprng.secret_seed_length]u8 = undefined;
|
||||
for (seed) |*it| it.* = 'a';
|
||||
|
||||
// Initialize the rng and allocator
|
||||
var rng = std.rand.DefaultCsprng.init(seed);
|
||||
return rng;
|
||||
}
|
||||
|
||||
/// Taken from https://github.com/codeyu/nanoid-net/blob/445f4d363e0079e151ea414dab1a9f9961679e7e/test/Nanoid.Test/NanoidTest.cs#L145
|
||||
fn toBeCloseTo(actual: f64, expected: f64, precision: f64) bool
|
||||
{
|
||||
const pass = @fabs(expected - actual) < std.math.pow(f64, 10, -precision) / 2;
|
||||
return pass;
|
||||
}
|
||||
|
||||
/// Checks if all elements in `array` are present in `includedIn`.
|
||||
fn allIn(comptime T: type, array: []const T, includedIn: []const T) bool
|
||||
{
|
||||
for (array) |it|
|
||||
{
|
||||
if (std.mem.indexOfScalar(u8, includedIn, it) == null)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Returns an array with all the public constants from a struct.
|
||||
fn collectAllConstantsInStruct(comptime namespace: type, comptime T: type) []const T
|
||||
{
|
||||
var result: []const T = &.{};
|
||||
for (@typeInfo(namespace).Struct.decls) |decl|
|
||||
{
|
||||
if (!decl.is_pub) continue;
|
||||
|
||||
const value = @field(namespace, decl.name);
|
||||
|
||||
if (@TypeOf(value) == T)
|
||||
{
|
||||
result = result ++ [_]T{ value };
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
test "calling computeMask with all acceptable input"
|
||||
{
|
||||
var i: u9 = 1;
|
||||
while (i <= max_alphabet_len) : (i += 1)
|
||||
{
|
||||
const alphabet_len = @truncate(u8, i);
|
||||
const mask = computeMask(alphabet_len);
|
||||
try std.testing.expect(mask > 0);
|
||||
}
|
||||
}
|
||||
|
||||
test "calling computeRngStepBufferLength with all acceptable alphabet sizes and default id length"
|
||||
{
|
||||
var i: u9 = 1;
|
||||
while (i <= max_alphabet_len) : (i += 1)
|
||||
{
|
||||
const alphabet_len = @truncate(u8, i);
|
||||
const rng_step_size = computeRngStepBufferLength(default_id_len, alphabet_len);
|
||||
try std.testing.expect(rng_step_size > 0);
|
||||
}
|
||||
}
|
||||
|
||||
test "generating an id with default settings"
|
||||
{
|
||||
// Init rng
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
// Generate a nanoid
|
||||
const result = generate(rng.random());
|
||||
try std.testing.expect(internal_utils.allIn(u8, &result, alphabets.default));
|
||||
}
|
||||
|
||||
test "generating an id with a custom length"
|
||||
{
|
||||
// Init rng
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
// Generate a nanoid
|
||||
const custom_id_len = 10;
|
||||
const rng_step_size = comptime computeRngStepBufferLength(custom_id_len, alphabets.default.len);
|
||||
|
||||
var result_buffer: [custom_id_len]u8 = undefined;
|
||||
var step_buffer: [rng_step_size]u8 = undefined;
|
||||
|
||||
const result = generateEx(rng.random(), alphabets.default, &result_buffer, &step_buffer);
|
||||
|
||||
try std.testing.expect(result.len == custom_id_len);
|
||||
}
|
||||
|
||||
test "generating an id with a custom alphabet"
|
||||
{
|
||||
// Initialize the rng
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
// Generate a nanoid
|
||||
const custom_alphabet = "1234abcd";
|
||||
const result = generateWithAlphabet(rng.random(), custom_alphabet);
|
||||
|
||||
try std.testing.expect(internal_utils.allIn(u8, &result, custom_alphabet));
|
||||
}
|
||||
|
||||
test "generating an id for all alphabets"
|
||||
{
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
for (all_alphabets) |alphabet|
|
||||
{
|
||||
const result = generateWithAlphabet(rng.random(), alphabet);
|
||||
|
||||
try std.testing.expect(internal_utils.allIn(u8, &result, alphabet));
|
||||
}
|
||||
}
|
||||
|
||||
test "generating an id with a custom alphabet and length"
|
||||
{
|
||||
// Initialize the rng
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
// Generate a nanoid
|
||||
const custom_alphabet = "1234abcd";
|
||||
const custom_id_len = 7;
|
||||
var result_buffer: [custom_id_len]u8 = undefined;
|
||||
var step_buffer: [computeSufficientRngStepBufferLengthFor(custom_id_len)]u8 = undefined;
|
||||
const result = generateEx(rng.random(), custom_alphabet, &result_buffer, &step_buffer);
|
||||
|
||||
try std.testing.expect(result.len == custom_id_len);
|
||||
|
||||
for (result) |it|
|
||||
{
|
||||
try std.testing.expect(std.mem.indexOfScalar(u8, custom_alphabet, it) != null);
|
||||
}
|
||||
}
|
||||
|
||||
test "generating an id with a single letter alphabet"
|
||||
{
|
||||
// Initialize the rng and allocator
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
|
||||
// Generate a nanoid
|
||||
const custom_alphabet = "a";
|
||||
const custom_id_len = 5;
|
||||
var result_buffer: [custom_id_len]u8 = undefined;
|
||||
var step_buffer: [computeSufficientRngStepBufferLengthFor(custom_id_len)]u8 = undefined;
|
||||
const result = generateEx(rng.random(), custom_alphabet, &result_buffer, &step_buffer);
|
||||
|
||||
try std.testing.expect(std.mem.eql(u8, "aaaaa", result));
|
||||
}
|
||||
|
||||
test "flat distribution of generated ids"
|
||||
{
|
||||
// Initialize the rng and allocator
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer _ = gpa.deinit();
|
||||
|
||||
// Generate a nanoid
|
||||
const number_of_ids_to_generate = 100 * 1000;
|
||||
|
||||
var characters_counts = std.AutoArrayHashMap(u8, usize).init(gpa.allocator());
|
||||
defer characters_counts.deinit();
|
||||
|
||||
// Generate ids
|
||||
var i: usize = 0;
|
||||
while (i < number_of_ids_to_generate) : (i += 1)
|
||||
{
|
||||
const id = generate(rng.random());
|
||||
|
||||
// Count the occurence of every character across all generated ids
|
||||
for (id) |char|
|
||||
{
|
||||
var char_count = characters_counts.getPtr(char);
|
||||
if (char_count) |c|
|
||||
{
|
||||
c.* += 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
try characters_counts.put(char, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (characters_counts.values()) |value|
|
||||
{
|
||||
const value_f = @intToFloat(f64, value);
|
||||
const alphabet_len_f = @intToFloat(f64, alphabets.default.len);
|
||||
const count_f = @intToFloat(f64, number_of_ids_to_generate);
|
||||
const id_len_f = @intToFloat(f64, default_id_len);
|
||||
const distribution = value_f * alphabet_len_f / (count_f * id_len_f);
|
||||
try std.testing.expect(internal_utils.toBeCloseTo(distribution, 1, 1));
|
||||
}
|
||||
}
|
||||
|
||||
test "flat distribution of generated ids with the iterative method"
|
||||
{
|
||||
// Initialize the rng and allocator
|
||||
var rng = internal_utils.makeDefaultCsprng();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer _ = gpa.deinit();
|
||||
|
||||
// Generate a nanoid
|
||||
const number_of_ids_to_generate = 100 * 1000;
|
||||
|
||||
var characters_counts = std.AutoArrayHashMap(u8, usize).init(gpa.allocator());
|
||||
defer characters_counts.deinit();
|
||||
|
||||
// Generate ids
|
||||
var i: usize = 0;
|
||||
while (i < number_of_ids_to_generate) : (i += 1)
|
||||
{
|
||||
var id_buffer: [default_id_len]u8 = undefined;
|
||||
const id = generateExWithIterativeRng(rng.random(), alphabets.default, &id_buffer);
|
||||
|
||||
// Count the occurence of every character across all generated ids
|
||||
for (id) |char|
|
||||
{
|
||||
var char_count = characters_counts.getPtr(char);
|
||||
if (char_count) |c|
|
||||
{
|
||||
c.* += 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
try characters_counts.put(char, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (characters_counts.values()) |value|
|
||||
{
|
||||
const value_f = @intToFloat(f64, value);
|
||||
const alphabet_len_f = @intToFloat(f64, alphabets.default.len);
|
||||
const count_f = @intToFloat(f64, number_of_ids_to_generate);
|
||||
const id_len_f = @intToFloat(f64, default_id_len);
|
||||
const distribution = value_f * alphabet_len_f / (count_f * id_len_f);
|
||||
try std.testing.expect(internal_utils.toBeCloseTo(distribution, 1, 1));
|
||||
}
|
||||
}
|
483
src/.deps/time.zig
Normal file
483
src/.deps/time.zig
Normal file
|
@ -0,0 +1,483 @@
|
|||
const std = @import("std");
|
||||
const string = []const u8;
|
||||
const extras = @import("./extras.zig");
|
||||
const time = @This();
|
||||
|
||||
pub const DateTime = struct {
|
||||
ms: u16,
|
||||
seconds: u16,
|
||||
minutes: u16,
|
||||
hours: u16,
|
||||
days: u16,
|
||||
months: u16,
|
||||
years: u16,
|
||||
timezone: TimeZone,
|
||||
weekday: WeekDay,
|
||||
era: Era,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn initUnixMs(unix: u64) Self {
|
||||
return epoch_unix.addMs(unix);
|
||||
}
|
||||
|
||||
pub fn initUnix(unix: u64) Self {
|
||||
return epoch_unix.addSecs(unix);
|
||||
}
|
||||
|
||||
/// Caller asserts that this is > epoch
|
||||
pub fn init(year: u16, month: u16, day: u16, hr: u16, min: u16, sec: u16) Self {
|
||||
return epoch_unix
|
||||
.addYears(year - epoch_unix.years)
|
||||
.addMonths(month)
|
||||
.addDays(day)
|
||||
.addHours(hr)
|
||||
.addMins(min)
|
||||
.addSecs(sec);
|
||||
}
|
||||
|
||||
pub fn now() Self {
|
||||
return initUnixMs(@intCast(u64, std.time.milliTimestamp()));
|
||||
}
|
||||
|
||||
pub const epoch_unix = Self{
|
||||
.ms = 0,
|
||||
.seconds = 0,
|
||||
.minutes = 0,
|
||||
.hours = 0,
|
||||
.days = 0,
|
||||
.months = 0,
|
||||
.years = 1970,
|
||||
.timezone = .UTC,
|
||||
.weekday = .Thu,
|
||||
.era = .AD,
|
||||
};
|
||||
|
||||
pub fn eql(self: Self, other: Self) bool {
|
||||
return self.ms == other.ms and
|
||||
self.seconds == other.seconds and
|
||||
self.minutes == other.minutes and
|
||||
self.hours == other.hours and
|
||||
self.days == other.days and
|
||||
self.months == other.months and
|
||||
self.years == other.years and
|
||||
self.timezone == other.timezone and
|
||||
self.weekday == other.weekday;
|
||||
}
|
||||
|
||||
pub fn addMs(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
result.ms += @intCast(u16, count % 1000);
|
||||
return result.addSecs(count / 1000);
|
||||
}
|
||||
|
||||
pub fn addSecs(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
result.seconds += @intCast(u16, count % 60);
|
||||
return result.addMins(count / 60);
|
||||
}
|
||||
|
||||
pub fn addMins(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
result.minutes += @intCast(u16, count % 60);
|
||||
return result.addHours(count / 60);
|
||||
}
|
||||
|
||||
pub fn addHours(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
result.hours += @intCast(u16, count % 24);
|
||||
return result.addDays(count / 24);
|
||||
}
|
||||
|
||||
pub fn addDays(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
var input = count;
|
||||
|
||||
while (true) {
|
||||
const year_len = result.daysThisYear();
|
||||
if (input >= year_len) {
|
||||
result.years += 1;
|
||||
input -= year_len;
|
||||
result.incrementWeekday(year_len);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
while (true) {
|
||||
const month_len = result.daysThisMonth();
|
||||
if (input >= month_len) {
|
||||
result.months += 1;
|
||||
input -= month_len;
|
||||
result.incrementWeekday(month_len);
|
||||
|
||||
if (result.months == 12) {
|
||||
result.years += 1;
|
||||
result.months = 0;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
{
|
||||
const month_len = result.daysThisMonth();
|
||||
if (result.days + input > month_len) {
|
||||
const left = month_len - result.days;
|
||||
input -= left;
|
||||
result.months += 1;
|
||||
result.days = 0;
|
||||
result.incrementWeekday(left);
|
||||
}
|
||||
result.days += @intCast(u16, input);
|
||||
result.incrementWeekday(input);
|
||||
|
||||
if (result.months == 12) {
|
||||
result.years += 1;
|
||||
result.months = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn addMonths(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
var result = self;
|
||||
var input = count;
|
||||
while (input > 0) {
|
||||
const new = result.addDays(result.daysThisMonth());
|
||||
result = new;
|
||||
input -= 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn addYears(self: Self, count: u64) Self {
|
||||
if (count == 0) return self;
|
||||
return self.addMonths(count * 12);
|
||||
}
|
||||
|
||||
pub fn isLeapYear(self: Self) bool {
|
||||
return time.isLeapYear(self.years);
|
||||
}
|
||||
|
||||
pub fn daysThisYear(self: Self) u16 {
|
||||
return time.daysInYear(self.years);
|
||||
}
|
||||
|
||||
pub fn daysThisMonth(self: Self) u16 {
|
||||
return self.daysInMonth(self.months);
|
||||
}
|
||||
|
||||
fn daysInMonth(self: Self, month: u16) u16 {
|
||||
return time.daysInMonth(self.years, month);
|
||||
}
|
||||
|
||||
fn incrementWeekday(self: *Self, count: u64) void {
|
||||
var i = count % 7;
|
||||
while (i > 0) : (i -= 1) {
|
||||
self.weekday = self.weekday.next();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dayOfThisYear(self: Self) u16 {
|
||||
var ret: u16 = 0;
|
||||
for (0..self.months) |item| {
|
||||
ret += self.daysInMonth(@intCast(u16, item));
|
||||
}
|
||||
ret += self.days;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn toUnix(self: Self) u64 {
|
||||
const x = self.toUnixMilli();
|
||||
return x / 1000;
|
||||
}
|
||||
|
||||
pub fn toUnixMilli(self: Self) u64 {
|
||||
var res: u64 = 0;
|
||||
res += self.ms;
|
||||
res += @as(u64, self.seconds) * std.time.ms_per_s;
|
||||
res += @as(u64, self.minutes) * std.time.ms_per_min;
|
||||
res += @as(u64, self.hours) * std.time.ms_per_hour;
|
||||
res += self.daysSinceEpoch() * std.time.ms_per_day;
|
||||
return res;
|
||||
}
|
||||
|
||||
fn daysSinceEpoch(self: Self) u64 {
|
||||
var res: u64 = 0;
|
||||
res += self.days;
|
||||
for (0..self.years - epoch_unix.years) |i| res += time.daysInYear(@intCast(u16, i));
|
||||
for (0..self.months) |i| res += self.daysInMonth(@intCast(u16, i));
|
||||
return res;
|
||||
}
|
||||
|
||||
/// fmt is based on https://momentjs.com/docs/#/displaying/format/
|
||||
pub fn format(self: Self, comptime fmt: string, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = options;
|
||||
|
||||
if (fmt.len == 0) @compileError("DateTime: format string can't be empty");
|
||||
|
||||
@setEvalBranchQuota(100000);
|
||||
|
||||
comptime var s = 0;
|
||||
comptime var e = 0;
|
||||
comptime var next: ?FormatSeq = null;
|
||||
inline for (fmt, 0..) |c, i| {
|
||||
e = i + 1;
|
||||
|
||||
if (comptime std.meta.stringToEnum(FormatSeq, fmt[s..e])) |tag| {
|
||||
next = tag;
|
||||
if (i < fmt.len - 1) continue;
|
||||
}
|
||||
|
||||
if (next) |tag| {
|
||||
switch (tag) {
|
||||
.MM => try writer.print("{:0>2}", .{self.months + 1}),
|
||||
.M => try writer.print("{}", .{self.months + 1}),
|
||||
.Mo => try printOrdinal(writer, self.months + 1),
|
||||
.MMM => try printLongName(writer, self.months, &[_]string{ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }),
|
||||
.MMMM => try printLongName(writer, self.months, &[_]string{ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" }),
|
||||
|
||||
.Q => try writer.print("{}", .{self.months / 3 + 1}),
|
||||
.Qo => try printOrdinal(writer, self.months / 3 + 1),
|
||||
|
||||
.D => try writer.print("{}", .{self.days + 1}),
|
||||
.Do => try printOrdinal(writer, self.days + 1),
|
||||
.DD => try writer.print("{:0>2}", .{self.days + 1}),
|
||||
|
||||
.DDD => try writer.print("{}", .{self.dayOfThisYear() + 1}),
|
||||
.DDDo => try printOrdinal(writer, self.dayOfThisYear() + 1),
|
||||
.DDDD => try writer.print("{:0>3}", .{self.dayOfThisYear() + 1}),
|
||||
|
||||
.d => try writer.print("{}", .{@enumToInt(self.weekday)}),
|
||||
.do => try printOrdinal(writer, @enumToInt(self.weekday)),
|
||||
.dd => try writer.writeAll(@tagName(self.weekday)[0..2]),
|
||||
.ddd => try writer.writeAll(@tagName(self.weekday)),
|
||||
.dddd => try printLongName(writer, @enumToInt(self.weekday), &[_]string{ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" }),
|
||||
.e => try writer.print("{}", .{@enumToInt(self.weekday)}),
|
||||
.E => try writer.print("{}", .{@enumToInt(self.weekday) + 1}),
|
||||
|
||||
.w => try writer.print("{}", .{self.dayOfThisYear() / 7 + 1}),
|
||||
.wo => try printOrdinal(writer, self.dayOfThisYear() / 7 + 1),
|
||||
.ww => try writer.print("{:0>2}", .{self.dayOfThisYear() / 7 + 1}),
|
||||
|
||||
.Y => try writer.print("{}", .{self.years + 10000}),
|
||||
.YY => try writer.print("{:0>2}", .{self.years % 100}),
|
||||
.YYY => try writer.print("{}", .{self.years}),
|
||||
.YYYY => try writer.print("{:0>4}", .{self.years}),
|
||||
|
||||
.N => try writer.writeAll(@tagName(self.era)),
|
||||
.NN => try writer.writeAll("Anno Domini"),
|
||||
|
||||
.A => try printLongName(writer, self.hours / 12, &[_]string{ "AM", "PM" }),
|
||||
.a => try printLongName(writer, self.hours / 12, &[_]string{ "am", "pm" }),
|
||||
|
||||
.H => try writer.print("{}", .{self.hours}),
|
||||
.HH => try writer.print("{:0>2}", .{self.hours}),
|
||||
.h => try writer.print("{}", .{wrap(self.hours, 12)}),
|
||||
.hh => try writer.print("{:0>2}", .{wrap(self.hours, 12)}),
|
||||
.k => try writer.print("{}", .{wrap(self.hours, 24)}),
|
||||
.kk => try writer.print("{:0>2}", .{wrap(self.hours, 24)}),
|
||||
|
||||
.m => try writer.print("{}", .{self.minutes}),
|
||||
.mm => try writer.print("{:0>2}", .{self.minutes}),
|
||||
|
||||
.s => try writer.print("{}", .{self.seconds}),
|
||||
.ss => try writer.print("{:0>2}", .{self.seconds}),
|
||||
|
||||
.S => try writer.print("{}", .{self.ms / 100}),
|
||||
.SS => try writer.print("{:0>2}", .{self.ms / 10}),
|
||||
.SSS => try writer.print("{:0>3}", .{self.ms}),
|
||||
|
||||
.z => try writer.writeAll(@tagName(self.timezone)),
|
||||
.Z => try writer.writeAll("+00:00"),
|
||||
.ZZ => try writer.writeAll("+0000"),
|
||||
|
||||
.x => try writer.print("{}", .{self.toUnixMilli()}),
|
||||
.X => try writer.print("{}", .{self.toUnix()}),
|
||||
}
|
||||
next = null;
|
||||
s = i;
|
||||
}
|
||||
|
||||
switch (c) {
|
||||
',',
|
||||
' ',
|
||||
':',
|
||||
'-',
|
||||
'.',
|
||||
'T',
|
||||
'W',
|
||||
=> {
|
||||
try writer.writeAll(&.{c});
|
||||
s = i + 1;
|
||||
continue;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn formatAlloc(self: Self, alloc: std.mem.Allocator, comptime fmt: string) !string {
|
||||
var list = std.ArrayList(u8).init(alloc);
|
||||
defer list.deinit();
|
||||
|
||||
try self.format(fmt, .{}, list.writer());
|
||||
return list.toOwnedSlice();
|
||||
}
|
||||
|
||||
const FormatSeq = enum {
|
||||
M, // 1 2 ... 11 12
|
||||
Mo, // 1st 2nd ... 11th 12th
|
||||
MM, // 01 02 ... 11 12
|
||||
MMM, // Jan Feb ... Nov Dec
|
||||
MMMM, // January February ... November December
|
||||
Q, // 1 2 3 4
|
||||
Qo, // 1st 2nd 3rd 4th
|
||||
D, // 1 2 ... 30 31
|
||||
Do, // 1st 2nd ... 30th 31st
|
||||
DD, // 01 02 ... 30 31
|
||||
DDD, // 1 2 ... 364 365
|
||||
DDDo, // 1st 2nd ... 364th 365th
|
||||
DDDD, // 001 002 ... 364 365
|
||||
d, // 0 1 ... 5 6
|
||||
do, // 0th 1st ... 5th 6th
|
||||
dd, // Su Mo ... Fr Sa
|
||||
ddd, // Sun Mon ... Fri Sat
|
||||
dddd, // Sunday Monday ... Friday Saturday
|
||||
e, // 0 1 ... 5 6 (locale)
|
||||
E, // 1 2 ... 6 7 (ISO)
|
||||
w, // 1 2 ... 52 53
|
||||
wo, // 1st 2nd ... 52nd 53rd
|
||||
ww, // 01 02 ... 52 53
|
||||
Y, // 11970 11971 ... 19999 20000 20001 (Holocene calendar)
|
||||
YY, // 70 71 ... 29 30
|
||||
YYY, // 1 2 ... 1970 1971 ... 2029 2030
|
||||
YYYY, // 0001 0002 ... 1970 1971 ... 2029 2030
|
||||
N, // BC AD
|
||||
NN, // Before Christ ... Anno Domini
|
||||
A, // AM PM
|
||||
a, // am pm
|
||||
H, // 0 1 ... 22 23
|
||||
HH, // 00 01 ... 22 23
|
||||
h, // 1 2 ... 11 12
|
||||
hh, // 01 02 ... 11 12
|
||||
k, // 1 2 ... 23 24
|
||||
kk, // 01 02 ... 23 24
|
||||
m, // 0 1 ... 58 59
|
||||
mm, // 00 01 ... 58 59
|
||||
s, // 0 1 ... 58 59
|
||||
ss, // 00 01 ... 58 59
|
||||
S, // 0 1 ... 8 9 (second fraction)
|
||||
SS, // 00 01 ... 98 99
|
||||
SSS, // 000 001 ... 998 999
|
||||
z, // EST CST ... MST PST
|
||||
Z, // -07:00 -06:00 ... +06:00 +07:00
|
||||
ZZ, // -0700 -0600 ... +0600 +0700
|
||||
x, // unix milli
|
||||
X, // unix
|
||||
};
|
||||
|
||||
pub fn since(self: Self, other_in_the_past: Self) Duration {
|
||||
return Duration{
|
||||
.ms = self.toUnixMilli() - other_in_the_past.toUnixMilli(),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const format = struct {
|
||||
pub const LT = "";
|
||||
pub const LTS = "";
|
||||
pub const L = "";
|
||||
pub const l = "";
|
||||
pub const LL = "";
|
||||
pub const ll = "";
|
||||
pub const LLL = "";
|
||||
pub const lll = "";
|
||||
pub const LLLL = "";
|
||||
pub const llll = "";
|
||||
};
|
||||
|
||||
pub const TimeZone = enum {
|
||||
UTC,
|
||||
|
||||
usingnamespace extras.TagNameJsonStringifyMixin(@This());
|
||||
};
|
||||
|
||||
pub const WeekDay = enum {
|
||||
Sun,
|
||||
Mon,
|
||||
Tue,
|
||||
Wed,
|
||||
Thu,
|
||||
Fri,
|
||||
Sat,
|
||||
|
||||
pub fn next(self: WeekDay) WeekDay {
|
||||
return switch (self) {
|
||||
.Sun => .Mon,
|
||||
.Mon => .Tue,
|
||||
.Tue => .Wed,
|
||||
.Wed => .Thu,
|
||||
.Thu => .Fri,
|
||||
.Fri => .Sat,
|
||||
.Sat => .Sun,
|
||||
};
|
||||
}
|
||||
|
||||
usingnamespace extras.TagNameJsonStringifyMixin(@This());
|
||||
};
|
||||
|
||||
pub const Era = enum {
|
||||
// BC,
|
||||
AD,
|
||||
|
||||
usingnamespace extras.TagNameJsonStringifyMixin(@This());
|
||||
};
|
||||
|
||||
pub fn isLeapYear(year: u16) bool {
|
||||
var ret = false;
|
||||
if (year % 4 == 0) ret = true;
|
||||
if (year % 100 == 0) ret = false;
|
||||
if (year % 400 == 0) ret = true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pub fn daysInYear(year: u16) u16 {
|
||||
return if (isLeapYear(year)) 366 else 365;
|
||||
}
|
||||
|
||||
fn daysInMonth(year: u16, month: u16) u16 {
|
||||
const norm = [12]u16{ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
|
||||
const leap = [12]u16{ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
|
||||
const month_days = if (!isLeapYear(year)) norm else leap;
|
||||
return month_days[month];
|
||||
}
|
||||
|
||||
fn printOrdinal(writer: anytype, num: u16) !void {
|
||||
try writer.print("{}", .{num});
|
||||
try writer.writeAll(switch (num) {
|
||||
1 => "st",
|
||||
2 => "nd",
|
||||
3 => "rd",
|
||||
else => "th",
|
||||
});
|
||||
}
|
||||
|
||||
fn printLongName(writer: anytype, index: u16, names: []const string) !void {
|
||||
try writer.writeAll(names[index]);
|
||||
}
|
||||
|
||||
fn wrap(val: u16, at: u16) u16 {
|
||||
var tmp = val % at;
|
||||
return if (tmp == 0) at else tmp;
|
||||
}
|
||||
|
||||
pub const Duration = struct {
|
||||
ms: u64,
|
||||
};
|
30
src/.deps/zig-sqlite/.builds/alpine-edge-amd64.yml
Normal file
30
src/.deps/zig-sqlite/.builds/alpine-edge-amd64.yml
Normal file
|
@ -0,0 +1,30 @@
|
|||
image: alpine/edge
|
||||
arch: x86_64
|
||||
packages:
|
||||
- curl
|
||||
- jq
|
||||
- sqlite-dev
|
||||
- xz
|
||||
- qemu-aarch64
|
||||
- qemu-i386
|
||||
- qemu-riscv64
|
||||
- qemu-mips
|
||||
- qemu-arm
|
||||
sources:
|
||||
- https://github.com/vrischmann/zig-sqlite
|
||||
|
||||
tasks:
|
||||
- install_zig: |
|
||||
curl -L -s "https://ziglang.org/download/index.json" | jq '.master["x86_64-linux"].tarball' -r >> ~/zig_master_url
|
||||
curl -J -o ~/zig.tar.xz $(cat ~/zig_master_url)
|
||||
tar xJf ~/zig.tar.xz
|
||||
mv ~/zig-linux-* ~/zig-master
|
||||
echo "export PATH=$PATH:~/zig-master" >> ~/.buildenv
|
||||
|
||||
- test_in_memory: |
|
||||
cd zig-sqlite
|
||||
TERM=dumb zig build test -Din_memory=true
|
||||
|
||||
- test_in_memory_with_qemu: |
|
||||
cd zig-sqlite
|
||||
TERM=dumb zig build test -Din_memory=true -Denable_qemu=true
|
21
src/.deps/zig-sqlite/.builds/debian-stable-aarch64.yml
Normal file
21
src/.deps/zig-sqlite/.builds/debian-stable-aarch64.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
image: debian/stable
|
||||
arch: arm64
|
||||
packages:
|
||||
- curl
|
||||
- jq
|
||||
- libsqlite3-dev
|
||||
sources:
|
||||
- https://github.com/vrischmann/zig-sqlite
|
||||
|
||||
tasks:
|
||||
- install_zig: |
|
||||
curl -L -s "https://ziglang.org/download/index.json" | jq '.master["aarch64-linux"].tarball' -r >> ~/zig_master_url
|
||||
curl -J -o ~/zig.tar.xz $(cat ~/zig_master_url)
|
||||
tar xJf ~/zig.tar.xz
|
||||
mv ~/zig-linux-* ~/zig-master
|
||||
echo "export PATH=$PATH:~/zig-master" >> ~/.buildenv
|
||||
|
||||
- test_in_memory: |
|
||||
cd zig-sqlite
|
||||
TERM=dumb zig build test -Din_memory=true
|
||||
|
20
src/.deps/zig-sqlite/.builds/freebsd-latest-amd64.yml
Normal file
20
src/.deps/zig-sqlite/.builds/freebsd-latest-amd64.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
image: freebsd/latest
|
||||
arch: amd64
|
||||
packages:
|
||||
- curl
|
||||
- jq
|
||||
- sqlite3
|
||||
sources:
|
||||
- https://github.com/vrischmann/zig-sqlite
|
||||
|
||||
tasks:
|
||||
- install_zig: |
|
||||
curl -L -s "https://ziglang.org/download/index.json" | jq '.master["x86_64-freebsd"].tarball' -r >> ~/zig_master_url
|
||||
curl -J -o ~/zig.tar.xz $(cat ~/zig_master_url)
|
||||
tar xJf ~/zig.tar.xz
|
||||
mv ~/zig-freebsd-* ~/zig-master
|
||||
echo "export PATH=$PATH:~/zig-master" >> ~/.buildenv
|
||||
|
||||
- test_in_memory: |
|
||||
cd zig-sqlite
|
||||
TERM=dumb zig build test -Din_memory=true
|
1
src/.deps/zig-sqlite/.dockerignore
Normal file
1
src/.deps/zig-sqlite/.dockerignore
Normal file
|
@ -0,0 +1 @@
|
|||
/zig-cache
|
7
src/.deps/zig-sqlite/.gitignore
vendored
Normal file
7
src/.deps/zig-sqlite/.gitignore
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
/build_runner.zig
|
||||
zig-cache
|
||||
zig-out
|
||||
.zigmod
|
||||
deps.zig
|
||||
core.*
|
||||
/fuzz/outputs
|
21
src/.deps/zig-sqlite/LICENSE
Normal file
21
src/.deps/zig-sqlite/LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2020 Vincent Rischmann <vincent@rischmann.fr>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
684
src/.deps/zig-sqlite/README.md
Normal file
684
src/.deps/zig-sqlite/README.md
Normal file
|
@ -0,0 +1,684 @@
|
|||
# zig-sqlite
|
||||
|
||||
This package is a thin wrapper around [sqlite](https://sqlite.org/index.html)'s C API.
|
||||
|
||||
_Maintainer note_: I'm currently on a break working with Zig and don't intend to work on new features for zig-sqlite.
|
||||
I will keep it updated for the latest Zig versions because that doesn't take too much of my time.
|
||||
|
||||
# Status
|
||||
|
||||
While the core functionality works right now, the API is still subject to changes.
|
||||
|
||||
If you use this library, expect to have to make changes when you update the code.
|
||||
|
||||
# Zig release support
|
||||
|
||||
`zig-sqlite` only supports Zig master (as can be found [here](https://ziglang.org/download/)). The plan is to support releases once Zig 1.0 is released but this can still change.
|
||||
|
||||
The Zig self-hosted/stage2 compiler is now [the default](https://github.com/ziglang/zig/pull/12368) however currently it can't be used with `zig-sqlite` due to bugs.
|
||||
|
||||
Eventually `zig-sqlite` will only support stage2 but until a point I feel comfortable doing that, the `master` branch will stay compatible with stage1 and all work for stage2 will happen in the `stage2` branch.
|
||||
|
||||
# Table of contents
|
||||
|
||||
* [Status](#status)
|
||||
* [Requirements](#requirements)
|
||||
* [Features](#features)
|
||||
* [Installation](#installation)
|
||||
* [zigmod](#zigmod)
|
||||
* [Git submodule](#git-submodule)
|
||||
* [Using the system sqlite library](#using-the-system-sqlite-library)
|
||||
* [Using the bundled sqlite source code file](#using-the-bundled-sqlite-source-code-file)
|
||||
* [Usage](#usage)
|
||||
* [Initialization](#initialization)
|
||||
* [Preparing a statement](#preparing-a-statement)
|
||||
* [Common use](#common-use)
|
||||
* [Diagnostics](#diagnostics)
|
||||
* [Executing a statement](#executing-a-statement)
|
||||
* [Reuse a statement](#reuse-a-statement)
|
||||
* [Reading data](#reading-data)
|
||||
* [Type parameter](#type-parameter)
|
||||
* [Non allocating](#non-allocating)
|
||||
* [Allocating](#allocating)
|
||||
* [Iterating](#iterating)
|
||||
* [Non allocating](#non-allocating-1)
|
||||
* [Allocating](#allocating-1)
|
||||
* [Bind parameters and resultset rows](#bind-parameters-and-resultset-rows)
|
||||
* [Custom type binding and reading](#custom-type-binding-and-reading)
|
||||
* [Note about complex allocations](#note-about-complex-allocations)
|
||||
* [Comptime checks](#comptime-checks)
|
||||
* [Check the number of bind parameters.](#check-the-number-of-bind-parameters)
|
||||
* [Assign types to bind markers and check them.](#assign-types-to-bind-markers-and-check-them)
|
||||
* [User defined SQL functions](#user-defined-sql-functions)
|
||||
* [Scalar functions](#scalar-functions)
|
||||
* [Aggregate functions](#aggregate-functions)
|
||||
|
||||
# Requirements
|
||||
|
||||
[Zig master](https://ziglang.org/download/) is the only required dependency.
|
||||
|
||||
For sqlite, you have options depending on your target:
|
||||
* On Windows the only supported way at the moment to build `zig-sqlite` is with the bundled sqlite source code file.
|
||||
* On Linux we have two options:
|
||||
* use the system and development package for sqlite (`libsqlite3-dev` for Debian and derivatives, `sqlite3-devel` for Fedora)
|
||||
* use the bundled sqlite source code file.
|
||||
|
||||
# Features
|
||||
|
||||
* Preparing, executing statements
|
||||
* comptime checked bind parameters
|
||||
* user defined SQL functions
|
||||
|
||||
# Installation
|
||||
|
||||
There are two primary ways to include `zig-sqlite` in your project:
|
||||
* using the [zigmod](https://github.com/nektro/zigmod) package manager
|
||||
* using a git submodule
|
||||
|
||||
## zigmod
|
||||
|
||||
Add this to your `zig.mod` file:
|
||||
```
|
||||
dependencies:
|
||||
- src: git https://github.com/vrischmann/zig-sqlite branch-master
|
||||
```
|
||||
|
||||
Note that if you're building an executable and not a library you should use `dev_dependencies` instead.
|
||||
|
||||
Next run `zigmod fetch`; it should create a `deps.zig` file.
|
||||
|
||||
Now in your `build.zig` you can access the package like this:
|
||||
```zig
|
||||
const deps = @import("deps.zig");
|
||||
...
|
||||
deps.addAllTo(exe);
|
||||
```
|
||||
|
||||
This is the easiest way to add `zig-sqlite` because it uses the bundled source code, avoiding all sorts of linking problems.
|
||||
|
||||
## Git submodule
|
||||
|
||||
If you don't want to use a package manager you can simply add this repository as a git submodule.
|
||||
|
||||
Then you need to chose if you want to use the system sqlite library or the bundled source code.
|
||||
|
||||
## Using the system sqlite library
|
||||
|
||||
If you want to use the system sqlite library, add the following to your `build.zig` target(s):
|
||||
|
||||
```zig
|
||||
exe.linkLibC();
|
||||
exe.linkSystemLibrary("sqlite3");
|
||||
exe.addPackage(.{ .name = "sqlite", .path = "third_party/zig-sqlite/sqlite.zig" });
|
||||
```
|
||||
|
||||
## Using the bundled sqlite source code file
|
||||
|
||||
If you want to use the bundled sqlite source code file, first you need to add it as a static library in your `build.zig` file:
|
||||
|
||||
```zig
|
||||
const sqlite = b.addStaticLibrary(.{ .name = "sqlite", .target = target, .optimize = .ReleaseSafe });
|
||||
sqlite.addCSourceFile("third_party/zig-sqlite/c/sqlite3.c", &[_][]const u8{"-std=c99"});
|
||||
sqlite.linkLibC();
|
||||
```
|
||||
|
||||
If you need to define custom [compile-time options](https://www.sqlite.org/compile.html#overview) for sqlite, modify the flags (second argument to `addCSourceFile`).
|
||||
|
||||
Now it's just a matter of linking your `build.zig` target(s) to this library instead of the system one:
|
||||
|
||||
```zig
|
||||
exe.linkLibrary(sqlite);
|
||||
exe.addModule("sqlite", b.createModule(.{ .source_file = .{ .path = "third_party/zig-sqlite/sqlite.zig", } }));
|
||||
exe.addIncludePath("third_party/zig-sqlite/c");
|
||||
```
|
||||
|
||||
If you're building with glibc you must make sure that the version used is at least 2.28.
|
||||
|
||||
You can do that in your `build.zig` file:
|
||||
```zig
|
||||
var target = b.standardTargetOptions(.{});
|
||||
target.setGnuLibCVersion(2, 28, 0);
|
||||
exe.setTarget(target);
|
||||
```
|
||||
|
||||
Or with `-Dtarget`:
|
||||
```
|
||||
$ zig build -Dtarget=native-linux-gnu.2.28
|
||||
```
|
||||
|
||||
# Usage
|
||||
|
||||
Import `zig-sqlite` like this:
|
||||
|
||||
```zig
|
||||
const sqlite = @import("sqlite");
|
||||
```
|
||||
|
||||
## Initialization
|
||||
|
||||
You must create and initialize an instance of `sqlite.Db`:
|
||||
|
||||
```zig
|
||||
var db = try sqlite.Db.init(.{
|
||||
.mode = sqlite.Db.Mode{ .File = "/home/vincent/mydata.db" },
|
||||
.open_flags = .{
|
||||
.write = true,
|
||||
.create = true,
|
||||
},
|
||||
.threading_mode = .MultiThread,
|
||||
});
|
||||
```
|
||||
|
||||
The `init` method takes a `InitOptions` struct which will be used to configure sqlite.
|
||||
|
||||
Only the `mode` field is mandatory, the other fields have sane default values.
|
||||
|
||||
## Preparing a statement
|
||||
|
||||
### Common use
|
||||
|
||||
sqlite works exclusively by using prepared statements. The wrapper type is `sqlite.Statement`. Here is how you get one:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT id, name, age, salary FROM employees WHERE age > ? AND age < ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
```
|
||||
|
||||
The `Db.prepare` method takes a `comptime` query string.
|
||||
|
||||
### Diagnostics
|
||||
|
||||
If you want failure diagnostics you can use `prepareWithDiags` like this:
|
||||
|
||||
```zig
|
||||
var diags = sqlite.Diagnostics{};
|
||||
var stmt = db.prepareWithDiags(query, .{ .diags = &diags }) catch |err| {
|
||||
std.log.err("unable to prepare statement, got error {}. diagnostics: {s}", .{ err, diags });
|
||||
return err;
|
||||
};
|
||||
defer stmt.deinit();
|
||||
```
|
||||
|
||||
## Executing a statement
|
||||
|
||||
For queries which do not return data (`INSERT`, `UPDATE`) you can use the `exec` method:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\UPDATE foo SET salary = ? WHERE id = ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
try stmt.exec(.{
|
||||
.salary = 20000,
|
||||
.id = 40,
|
||||
});
|
||||
```
|
||||
|
||||
See the section "Bind parameters and resultset rows" for more information on the types mapping rules.
|
||||
|
||||
## Reuse a statement
|
||||
|
||||
You can reuse a statement by resetting it like this:
|
||||
```zig
|
||||
const query =
|
||||
\\UPDATE foo SET salary = ? WHERE id = ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
var id: usize = 0;
|
||||
while (id < 20) : (id += 1) {
|
||||
stmt.reset();
|
||||
try stmt.exec(.{
|
||||
.salary = 2000,
|
||||
.id = id,
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Reading data
|
||||
|
||||
For queries which return data you have multiple options:
|
||||
* `Statement.all` which takes an allocator and can allocate memory.
|
||||
* `Statement.one` which does not take an allocator and cannot allocate memory (aside from what sqlite allocates itself).
|
||||
* `Statement.oneAlloc` which takes an allocator and can allocate memory.
|
||||
|
||||
### Type parameter
|
||||
|
||||
All these methods take a type as first parameter.
|
||||
|
||||
The type represents a "row", it can be:
|
||||
* a struct where each field maps to the corresponding column in the resultset (so field 0 must map to column 1 and so on).
|
||||
* a single type, in that case the resultset must only return one column.
|
||||
|
||||
The type can be a pointer but only when using the methods taking an allocator.
|
||||
|
||||
Not all types are allowed, see the section "Bind parameters and resultset rows" for more information on the types mapping rules.
|
||||
|
||||
### Non allocating
|
||||
|
||||
Using `one`:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT name, age FROM employees WHERE id = ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
const row = try stmt.one(
|
||||
struct {
|
||||
name: [128:0]u8,
|
||||
age: usize,
|
||||
},
|
||||
.{},
|
||||
.{ .id = 20 },
|
||||
);
|
||||
if (row) |row| {
|
||||
std.log.debug("name: {}, age: {}", .{std.mem.spanZ(&row.name), row.age});
|
||||
}
|
||||
```
|
||||
Notice that to read text we need to use a 0-terminated array; if the `name` column is bigger than 127 bytes the call to `one` will fail.
|
||||
|
||||
If the length of the data is variable then the sentinel is mandatory: without one there would be no way to know where the data ends in the array.
|
||||
|
||||
However if the length is fixed, you can read into a non 0-terminated array, for example:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT id FROM employees WHERE name = ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
const row = try stmt.one(
|
||||
[16]u8,
|
||||
.{},
|
||||
.{ .name = "Vincent" },
|
||||
);
|
||||
if (row) |id| {
|
||||
std.log.debug("id: {s}", .{std.fmt.fmtSliceHexLower(&id)});
|
||||
}
|
||||
```
|
||||
|
||||
If the column data doesn't have the correct length a `error.ArraySizeMismatch` will be returned.
|
||||
|
||||
The convenience function `sqlite.Db.one` works exactly the same way:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT age FROM employees WHERE id = ?
|
||||
;
|
||||
|
||||
const row = try db.one(usize, query, .{}, .{ .id = 20 });
|
||||
if (row) |age| {
|
||||
std.log.debug("age: {}", .{age});
|
||||
}
|
||||
```
|
||||
|
||||
### Allocating
|
||||
|
||||
Using `all`:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT name FROM employees WHERE age > ? AND age < ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
const names = try stmt.all([]const u8, allocator, .{}, .{
|
||||
.age1 = 20,
|
||||
.age2 = 40,
|
||||
});
|
||||
for (names) |name| {
|
||||
std.log.debug("name: {s}", .{ name });
|
||||
}
|
||||
```
|
||||
|
||||
Using `oneAlloc`:
|
||||
|
||||
```zig
|
||||
const query =
|
||||
\\SELECT name FROM employees WHERE id = ?
|
||||
;
|
||||
|
||||
var stmt = try db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
const row = try stmt.oneAlloc([]const u8, allocator, .{}, .{
|
||||
.id = 200,
|
||||
});
|
||||
if (row) |name| {
|
||||
std.log.debug("name: {}", .{name});
|
||||
}
|
||||
```
|
||||
|
||||
## Iterating
|
||||
|
||||
Another way to get the data returned by a query is to use the `sqlite.Iterator` type.
|
||||
|
||||
You can only get one by calling the `iterator` method on a statement.
|
||||
|
||||
The `iterator` method takes a type which is the same as with `all`, `one` or `oneAlloc`: every row retrieved by calling `next` or `nextAlloc` will have this type.
|
||||
|
||||
Iterating is done by calling the `next` or `nextAlloc` method on an iterator. Just like before, `next` cannot allocate memory while `nextAlloc` can allocate memory.
|
||||
|
||||
`next` or `nextAlloc` will either return an optional value or an error; you should keep iterating until `null` is returned.
|
||||
|
||||
### Non allocating
|
||||
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT age FROM user WHERE age < ?");
|
||||
defer stmt.deinit();
|
||||
|
||||
var iter = try stmt.iterator(usize, .{
|
||||
.age = 20,
|
||||
});
|
||||
|
||||
while (try iter.next(.{})) |age| {
|
||||
std.debug.print("age: {}\n", .{age});
|
||||
}
|
||||
```
|
||||
|
||||
### Allocating
|
||||
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT name FROM user WHERE age < ?");
|
||||
defer stmt.deinit();
|
||||
|
||||
var iter = try stmt.iterator([]const u8, .{
|
||||
.age = 20,
|
||||
});
|
||||
|
||||
while (true) {
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const name = (try iter.nextAlloc(arena.allocator(), .{})) orelse break;
|
||||
std.debug.print("name: {}\n", .{name});
|
||||
}
|
||||
```
|
||||
|
||||
## Bind parameters and resultset rows
|
||||
|
||||
Since sqlite doesn't have many [types](https://www.sqlite.org/datatype3.html) only a small number of Zig types are allowed in binding parameters and in resultset mapping types.
|
||||
|
||||
Here are the rules for bind parameters:
|
||||
* any Zig `Int` or `ComptimeInt` is treated as a `INTEGER`.
|
||||
* any Zig `Float` or `ComptimeFloat` is treated as a `REAL`.
|
||||
* `[]const u8`, `[]u8` is treated as a `TEXT`.
|
||||
* the custom `sqlite.Blob` type is treated as a `BLOB`.
|
||||
* the custom `sqlite.Text` type is treated as a `TEXT`.
|
||||
* the `null` value is treated as a `NULL`.
|
||||
* non-null optionals are treated like a regular value, null optionals are treated as a `NULL`.
|
||||
|
||||
Here are the rules for resultset rows:
|
||||
* `INTEGER` can be read into any Zig `Int` provided the data fits.
|
||||
* `REAL` can be read into any Zig `Float` provided the data fits.
|
||||
* `TEXT` can be read into a `[]const u8` or `[]u8`.
|
||||
* `TEXT` can be read into any array of `u8` with a sentinel provided the data fits.
|
||||
* `BLOB` follows the same rules as `TEXT`.
|
||||
* `NULL` can be read into any optional.
|
||||
|
||||
Note that arrays must have a sentinel because we need a way to communicate where the data actually stops in the array, so for example use `[200:0]u8` for a `TEXT` field.
|
||||
|
||||
## Custom type binding and reading
|
||||
|
||||
Sometimes the default field binding or reading logic is not what you want, for example if you want to store an enum using its tag name instead of its integer value or
|
||||
if you want to store a byte slice as an hex string.
|
||||
|
||||
To accomplish this you must first define a wrapper struct for your type. For example if your type is a `[4]u8` and you want to treat it as an integer:
|
||||
```zig
|
||||
pub const MyArray = struct {
|
||||
data: [4]u8,
|
||||
|
||||
pub const BaseType = u32;
|
||||
|
||||
pub fn bindField(self: MyArray, _: std.mem.Allocator) !BaseType {
|
||||
return std.mem.readIntNative(BaseType, &self.data);
|
||||
}
|
||||
|
||||
pub fn readField(_: std.mem.Allocator, value: BaseType) !MyArray {
|
||||
var arr: MyArray = undefined;
|
||||
std.mem.writeIntNative(BaseType, &arr.data, value);
|
||||
return arr;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
Now when you bind a value of type `MyArray` the value returned by `bindField` will be used for binding instead.
|
||||
|
||||
Same for reading, when you select _into_ a `MyArray` row or field the value returned by `readField` will be used instead.
|
||||
|
||||
_NOTE_: when you _do_ allocate in `bindField` or `readField` make sure to pass a `std.heap.ArenaAllocator`-based allocator.
|
||||
|
||||
The binding or reading code does not keep tracking of allocations made in custom types so it can't free the allocated data itself; it's therefore required
|
||||
to use an arena to prevent memory leaks.
|
||||
|
||||
## Note about complex allocations
|
||||
|
||||
Depending on your queries and types there can be a lot of allocations required. Take the following example:
|
||||
```zig
|
||||
const User = struct {
|
||||
id: usize,
|
||||
first_name: []const u8,
|
||||
last_name: []const u8,
|
||||
data: []const u8,
|
||||
};
|
||||
|
||||
fn fetchUsers(allocator: std.mem.Allocator, db: *sqlite.Db) ![]User {
|
||||
var stmt = try db.prepare("SELECT id FROM user WHERE id > $id");
|
||||
defer stmt.deinit();
|
||||
|
||||
return stmt.all(User, allocator, .{}, .{ .id = 20 });
|
||||
}
|
||||
```
|
||||
|
||||
This will do multiple allocations:
|
||||
* one for each id field in the `User` type
|
||||
* one for the resulting slice
|
||||
|
||||
To facilitate memory handling, consider using an arena allocator like this:
|
||||
```zig
|
||||
var arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const users = try fetchUsers(arena.allocator(), db);
|
||||
_ = users;
|
||||
```
|
||||
|
||||
This is especially recommended if you use custom types that allocate memory since, as noted above, it's necessary to prevent memory leaks.
|
||||
|
||||
# Comptime checks
|
||||
|
||||
Prepared statements contain _comptime_ metadata which is used to validate every call to `exec`, `one` and `all` _at compile time_.
|
||||
|
||||
## Check the number of bind parameters.
|
||||
|
||||
The first check makes sure you provide the same number of bind parameters as there are bind markers in the query string.
|
||||
|
||||
Take the following code:
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT id FROM user WHERE age > ? AND age < ? AND weight > ?");
|
||||
defer stmt.deinit();
|
||||
|
||||
const rows = try stmt.all(usize, .{}, .{
|
||||
.age_1 = 10,
|
||||
.age_2 = 20,
|
||||
});
|
||||
_ = rows;
|
||||
```
|
||||
It fails with this compilation error:
|
||||
```
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:738:17: error: number of bind markers not equal to number of fields
|
||||
@compileError("number of bind markers not equal to number of fields");
|
||||
^
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:817:22: note: called from here
|
||||
self.bind(values);
|
||||
^
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:905:41: note: called from here
|
||||
var iter = try self.iterator(Type, values);
|
||||
^
|
||||
./src/main.zig:19:30: note: called from here
|
||||
const rows = try stmt.all(usize, allocator, .{}, .{
|
||||
^
|
||||
./src/main.zig:5:29: note: called from here
|
||||
pub fn main() anyerror!void {
|
||||
```
|
||||
|
||||
## Assign types to bind markers and check them.
|
||||
|
||||
The second (and more interesting) check makes sure you provide appropriately typed values as bind parameters.
|
||||
|
||||
This check is not automatic since with a standard SQL query we have no way to know the types of the bind parameters, to use it you must provide theses types in the SQL query with a custom syntax.
|
||||
|
||||
For example, take the same code as above but now we also bind the last parameter:
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT id FROM user WHERE age > ? AND age < ? AND weight > ?");
|
||||
defer stmt.deinit();
|
||||
|
||||
const rows = try stmt.all(usize, .{ .allocator = allocator }, .{
|
||||
.age_1 = 10,
|
||||
.age_2 = 20,
|
||||
.weight = false,
|
||||
});
|
||||
_ = rows;
|
||||
```
|
||||
|
||||
This compiles correctly even if the `weight` field in our `user` table is of the type `INTEGER`.
|
||||
|
||||
We can make sure the bind parameters have the right type if we rewrite the query like this:
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT id FROM user WHERE age > ? AND age < ? AND weight > ?{usize}");
|
||||
defer stmt.deinit();
|
||||
|
||||
const rows = try stmt.all(usize, .{ .allocator = allocator }, .{
|
||||
.age_1 = 10,
|
||||
.age_2 = 20,
|
||||
.weight = false,
|
||||
});
|
||||
_ = rows;
|
||||
```
|
||||
Now this fails to compile:
|
||||
```
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:745:25: error: value type bool is not the bind marker type usize
|
||||
@compileError("value type " ++ @typeName(struct_field.field_type) ++ " is not the bind marker type " ++ @typeName(typ));
|
||||
^
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:817:22: note: called from here
|
||||
self.bind(values);
|
||||
^
|
||||
/home/vincent/dev/perso/libs/zig-sqlite/sqlite.zig:905:41: note: called from here
|
||||
var iter = try self.iterator(Type, values);
|
||||
^
|
||||
./src/main.zig:19:30: note: called from here
|
||||
const rows = try stmt.all(usize, allocator, .{}, .{
|
||||
^
|
||||
./src/main.zig:5:29: note: called from here
|
||||
pub fn main() anyerror!void {
|
||||
```
|
||||
The syntax is straightforward: a bind marker `?` followed by `{`, a Zig type name and finally `}`.
|
||||
|
||||
There are a limited number of types allowed currently:
|
||||
* all [integer](https://ziglang.org/documentation/master/#Primitive-Types) types.
|
||||
* all [arbitrary bit-width integer](https://ziglang.org/documentation/master/#Primitive-Types) types.
|
||||
* all [float](https://ziglang.org/documentation/master/#Primitive-Types) types.
|
||||
* bool.
|
||||
* strings with `[]const u8` or `[]u8`.
|
||||
* strings with `sqlite.Text`.
|
||||
* blobs with `sqlite.Blob`.
|
||||
|
||||
It's probably possible to support arbitrary types if they can be marshaled to a sqlite type. This is something to investigate.
|
||||
|
||||
**NOTE**: this is done at compile time and is quite CPU intensive, therefore it's possible you'll have to play with [@setEvalBranchQuota](https://ziglang.org/documentation/master/#setEvalBranchQuota) to make it compile.
|
||||
|
||||
To finish our example, passing the proper type allows it compile:
|
||||
```zig
|
||||
var stmt = try db.prepare("SELECT id FROM user WHERE age > ? AND age < ? AND weight > ?{usize}");
|
||||
defer stmt.deinit();
|
||||
|
||||
const rows = try stmt.all(usize, .{}, .{
|
||||
.age_1 = 10,
|
||||
.age_2 = 20,
|
||||
.weight = @as(usize, 200),
|
||||
});
|
||||
_ = rows;
|
||||
```
|
||||
|
||||
# User defined SQL functions
|
||||
|
||||
sqlite supports [user-defined SQL functions](https://www.sqlite.org/c3ref/create_function.html) which come in two types:
|
||||
* scalar functions
|
||||
* aggregate functions
|
||||
|
||||
In both cases the arguments are [sqlite3\_values](https://www.sqlite.org/c3ref/value_blob.html) and are converted to Zig values using the following rules:
|
||||
* `TEXT` values can be either `sqlite.Text` or `[]const u8`
|
||||
* `BLOB` values can be either `sqlite.Blob` or `[]const u8`
|
||||
* `INTEGER` values can be any Zig integer
|
||||
* `REAL` values can be any Zig float
|
||||
|
||||
## Scalar functions
|
||||
|
||||
You can define a scalar function using `db.createScalarFunction`:
|
||||
```zig
|
||||
try db.createScalarFunction(
|
||||
"blake3",
|
||||
struct {
|
||||
fn run(input: []const u8) [std.crypto.hash.Blake3.digest_length]u8 {
|
||||
var hash: [std.crypto.hash.Blake3.digest_length]u8 = undefined;
|
||||
std.crypto.hash.Blake3.hash(input, &hash, .{});
|
||||
return hash;
|
||||
}
|
||||
}.run,
|
||||
.{},
|
||||
);
|
||||
|
||||
const hash = try db.one([std.crypto.hash.Blake3.digest_length]u8, "SELECT blake3('hello')", .{}, .{});
|
||||
```
|
||||
|
||||
Each input arguments in the function call in the statement is passed on to the registered `run` function.
|
||||
|
||||
## Aggregate functions
|
||||
|
||||
You can define a scalar function using `db.createAggregateFunction`:
|
||||
```zig
|
||||
const MyContext = struct {
|
||||
sum: u32,
|
||||
};
|
||||
var my_ctx = MyContext{ .sum = 0 };
|
||||
|
||||
try db.createAggregateFunction(
|
||||
"mySum",
|
||||
&my_ctx,
|
||||
struct {
|
||||
fn step(ctx: *MyContext, input: u32) void {
|
||||
ctx.sum += input;
|
||||
}
|
||||
}.step,
|
||||
struct {
|
||||
fn finalize(ctx: *MyContext) u32 {
|
||||
return ctx.sum;
|
||||
}
|
||||
}.finalize,
|
||||
.{},
|
||||
);
|
||||
|
||||
const result = try db.one(usize, "SELECT mySum(nb) FROM foobar", .{}, .{});
|
||||
```
|
||||
|
||||
Each input arguments in the function call in the statement is passed on to the registered `step` function.
|
||||
The `finalize` function is called once at the end.
|
||||
|
||||
The context (2nd argument of `createAggregateFunction`) can be whatever you want; both `step` and `finalize` function must
|
||||
have their first argument of the same type as the context.
|
374
src/.deps/zig-sqlite/build.zig
Normal file
374
src/.deps/zig-sqlite/build.zig
Normal file
|
@ -0,0 +1,374 @@
|
|||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
var sqlite3: ?*std.Build.Step.Compile = null;
|
||||
|
||||
fn linkSqlite(b: *std.Build.Step.Compile) void {
|
||||
if (sqlite3) |lib| {
|
||||
b.linkLibrary(lib);
|
||||
} else {
|
||||
b.linkLibC();
|
||||
b.linkSystemLibrary("sqlite3");
|
||||
}
|
||||
}
|
||||
|
||||
fn getTarget(original_target: std.zig.CrossTarget, bundled: bool) std.zig.CrossTarget {
|
||||
if (bundled) {
|
||||
var tmp = original_target;
|
||||
|
||||
if (tmp.isGnuLibC()) {
|
||||
const min_glibc_version = std.builtin.Version{
|
||||
.major = 2,
|
||||
.minor = 28,
|
||||
.patch = 0,
|
||||
};
|
||||
if (tmp.glibc_version) |ver| {
|
||||
if (ver.order(min_glibc_version) == .lt) {
|
||||
std.debug.panic("sqlite requires glibc version >= 2.28", .{});
|
||||
}
|
||||
} else {
|
||||
tmp.setGnuLibCVersion(2, 28, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
return original_target;
|
||||
}
|
||||
|
||||
const TestTarget = struct {
|
||||
target: std.zig.CrossTarget = @as(std.zig.CrossTarget, .{}),
|
||||
single_threaded: bool = false,
|
||||
bundled: bool,
|
||||
};
|
||||
|
||||
const all_test_targets = switch (builtin.target.cpu.arch) {
|
||||
.x86_64 => switch (builtin.target.os.tag) {
|
||||
.linux => [_]TestTarget{
|
||||
// Targets linux but other CPU archs.
|
||||
TestTarget{
|
||||
.target = .{},
|
||||
.bundled = false,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
.abi = .musl,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86,
|
||||
.abi = .musl,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .aarch64,
|
||||
.abi = .musl,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .riscv64,
|
||||
.abi = .musl,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .mips,
|
||||
.abi = .musl,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
// TODO(vincent): failing for some time for unknown reasons
|
||||
// TestTarget{
|
||||
// .target = .{
|
||||
// .cpu_arch = .arm,
|
||||
// .abi = .musleabihf,
|
||||
// },
|
||||
// .bundled = true,
|
||||
// },
|
||||
// Targets windows
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .windows,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86,
|
||||
.os_tag = .windows,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
// Targets macOS
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .macos,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .aarch64,
|
||||
.os_tag = .macos,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
},
|
||||
.windows => [_]TestTarget{
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
.abi = .gnu,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86,
|
||||
.abi = .gnu,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
},
|
||||
.freebsd => [_]TestTarget{
|
||||
TestTarget{
|
||||
.target = .{},
|
||||
.bundled = false,
|
||||
},
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
},
|
||||
.macos => [_]TestTarget{
|
||||
TestTarget{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
},
|
||||
.bundled = true,
|
||||
},
|
||||
},
|
||||
else => [_]TestTarget{
|
||||
TestTarget{
|
||||
.target = .{},
|
||||
.bundled = false,
|
||||
},
|
||||
},
|
||||
},
|
||||
else => [_]TestTarget{
|
||||
TestTarget{
|
||||
.target = .{},
|
||||
.bundled = false,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub fn build(b: *std.Build) !void {
|
||||
const in_memory = b.option(bool, "in_memory", "Should the tests run with sqlite in memory (default true)") orelse true;
|
||||
const dbfile = b.option([]const u8, "dbfile", "Always use this database file instead of a temporary one");
|
||||
const use_bundled = b.option(bool, "use_bundled", "Use the bundled sqlite3 source instead of linking the system library (default false)");
|
||||
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
// Tool to preprocess the sqlite header files.
|
||||
//
|
||||
// Due to limitations of translate-c the standard header files can't be used for building loadable extensions
|
||||
// so we have this tool which creates usable header files.
|
||||
|
||||
const preprocess_files_tool = b.addExecutable(.{
|
||||
.name = "preprocess-files",
|
||||
.root_source_file = .{ .path = "tools/preprocess_files.zig" },
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// Add a top-level step to run the preprocess-files tool
|
||||
const preprocess_files_run = b.step("preprocess-files", "Run the preprocess-files tool");
|
||||
|
||||
const preprocess_files_tool_run = b.addRunArtifact(preprocess_files_tool);
|
||||
preprocess_files_run.dependOn(&preprocess_files_tool_run.step);
|
||||
|
||||
// If the target is native we assume the user didn't change it with -Dtarget and run all test targets.
|
||||
// Otherwise we run a single test target.
|
||||
const test_targets = if (target.isNative())
|
||||
&all_test_targets
|
||||
else
|
||||
&[_]TestTarget{.{
|
||||
.target = target,
|
||||
.bundled = use_bundled orelse false,
|
||||
}};
|
||||
|
||||
const test_step = b.step("test", "Run library tests");
|
||||
|
||||
// By default the tests will only be execute for native test targets, however they will be compiled
|
||||
// for _all_ targets defined in `test_targets`.
|
||||
//
|
||||
// If you want to execute tests for other targets you can pass -fqemu, -fdarling, -fwine, -frosetta.
|
||||
|
||||
for (test_targets) |test_target| {
|
||||
const bundled = use_bundled orelse test_target.bundled;
|
||||
const cross_target = getTarget(test_target.target, bundled);
|
||||
const single_threaded_txt = if (test_target.single_threaded) "single" else "multi";
|
||||
const test_name = b.fmt("{s}-{s}-{s}", .{
|
||||
try cross_target.zigTriple(b.allocator),
|
||||
@tagName(optimize),
|
||||
single_threaded_txt,
|
||||
});
|
||||
|
||||
const tests = b.addTest(.{
|
||||
.name = test_name,
|
||||
.target = cross_target,
|
||||
.optimize = optimize,
|
||||
.root_source_file = .{ .path = "sqlite.zig" },
|
||||
.single_threaded = test_target.single_threaded,
|
||||
});
|
||||
|
||||
if (bundled) {
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "sqlite",
|
||||
.target = cross_target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
lib.addCSourceFile("c/sqlite3.c", &[_][]const u8{"-std=c99"});
|
||||
lib.linkLibC();
|
||||
sqlite3 = lib;
|
||||
}
|
||||
|
||||
if (bundled) tests.addIncludePath("c");
|
||||
linkSqlite(tests);
|
||||
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "zig-sqlite",
|
||||
.root_source_file = .{ .path = "sqlite.zig" },
|
||||
.target = cross_target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
if (bundled) lib.addIncludePath("c");
|
||||
linkSqlite(lib);
|
||||
|
||||
const tests_options = b.addOptions();
|
||||
tests.addOptions("build_options", tests_options);
|
||||
|
||||
tests_options.addOption(bool, "in_memory", in_memory);
|
||||
tests_options.addOption(?[]const u8, "dbfile", dbfile);
|
||||
|
||||
test_step.dependOn(&tests.step);
|
||||
}
|
||||
|
||||
// Fuzzing
|
||||
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "sqlite",
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
lib.addCSourceFile("c/sqlite3.c", &[_][]const u8{"-std=c99"});
|
||||
lib.addIncludePath("c");
|
||||
lib.linkLibC();
|
||||
|
||||
// The library
|
||||
const fuzz_lib = b.addStaticLibrary(.{
|
||||
.name = "fuzz-lib",
|
||||
.root_source_file = .{ .path = "fuzz/main.zig" },
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
fuzz_lib.addIncludePath("c");
|
||||
fuzz_lib.linkLibrary(lib);
|
||||
fuzz_lib.want_lto = true;
|
||||
fuzz_lib.bundle_compiler_rt = true;
|
||||
fuzz_lib.addAnonymousModule("sqlite", .{
|
||||
.source_file = .{ .path = "sqlite.zig" },
|
||||
});
|
||||
|
||||
// Setup the output name
|
||||
const fuzz_executable_name = "fuzz";
|
||||
const fuzz_exe_path = try b.cache_root.join(b.allocator, &.{fuzz_executable_name});
|
||||
|
||||
// We want `afl-clang-lto -o path/to/output path/to/library`
|
||||
const fuzz_compile = b.addSystemCommand(&.{ "afl-clang-lto", "-o", fuzz_exe_path });
|
||||
fuzz_compile.addArtifactArg(lib);
|
||||
fuzz_compile.addArtifactArg(fuzz_lib);
|
||||
|
||||
// Install the cached output to the install 'bin' path
|
||||
const fuzz_install = b.addInstallBinFile(.{ .path = fuzz_exe_path }, fuzz_executable_name);
|
||||
|
||||
// Add a top-level step that compiles and installs the fuzz executable
|
||||
const fuzz_compile_run = b.step("fuzz", "Build executable for fuzz testing using afl-clang-lto");
|
||||
// fuzz_compile_run.dependOn(&fuzz_lib.step);
|
||||
fuzz_compile_run.dependOn(&fuzz_compile.step);
|
||||
fuzz_compile_run.dependOn(&fuzz_install.step);
|
||||
|
||||
// Compile a companion exe for debugging crashes
|
||||
const fuzz_debug_exe = b.addExecutable(.{
|
||||
.name = "fuzz-debug",
|
||||
.root_source_file = .{ .path = "fuzz/main.zig" },
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
fuzz_debug_exe.addIncludePath("c");
|
||||
fuzz_debug_exe.linkLibrary(lib);
|
||||
fuzz_debug_exe.addAnonymousModule("sqlite", .{
|
||||
.source_file = .{ .path = "sqlite.zig" },
|
||||
});
|
||||
|
||||
// Only install fuzz-debug when the fuzz step is run
|
||||
const install_fuzz_debug_exe = b.addInstallArtifact(fuzz_debug_exe);
|
||||
fuzz_compile_run.dependOn(&install_fuzz_debug_exe.step);
|
||||
|
||||
//
|
||||
// Examples
|
||||
//
|
||||
|
||||
// Loadable extension
|
||||
//
|
||||
// This builds an example shared library with the extension and a binary that tests it.
|
||||
|
||||
const zigcrypto_loadable_ext = b.addSharedLibrary(.{
|
||||
.name = "zigcrypto",
|
||||
.root_source_file = .{ .path = "examples/zigcrypto.zig" },
|
||||
.version = null,
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
zigcrypto_loadable_ext.force_pic = true;
|
||||
zigcrypto_loadable_ext.addIncludePath("c");
|
||||
zigcrypto_loadable_ext.addAnonymousModule("sqlite", .{
|
||||
.source_file = .{ .path = "sqlite.zig" },
|
||||
});
|
||||
zigcrypto_loadable_ext.linkLibrary(lib);
|
||||
|
||||
const install_zigcrypto_loadable_ext = b.addInstallArtifact(zigcrypto_loadable_ext);
|
||||
|
||||
const zigcrypto_test = b.addExecutable(.{
|
||||
.name = "zigcrypto-test",
|
||||
.root_source_file = .{ .path = "examples/zigcrypto_test.zig" },
|
||||
.target = getTarget(target, true),
|
||||
.optimize = optimize,
|
||||
});
|
||||
zigcrypto_test.addIncludePath("c");
|
||||
zigcrypto_test.addAnonymousModule("sqlite", .{
|
||||
.source_file = .{ .path = "sqlite.zig" },
|
||||
});
|
||||
zigcrypto_test.linkLibrary(lib);
|
||||
|
||||
const install_zigcrypto_test = b.addInstallArtifact(zigcrypto_test);
|
||||
|
||||
const zigcrypto_compile_run = b.step("zigcrypto", "Build the 'zigcrypto' SQLite loadable extension");
|
||||
zigcrypto_compile_run.dependOn(&install_zigcrypto_loadable_ext.step);
|
||||
zigcrypto_compile_run.dependOn(&install_zigcrypto_test.step);
|
||||
}
|
19
src/.deps/zig-sqlite/c.zig
Normal file
19
src/.deps/zig-sqlite/c.zig
Normal file
|
@ -0,0 +1,19 @@
|
|||
const root = @import("root");
|
||||
|
||||
pub const c = if (@hasDecl(root, "loadable_extension"))
|
||||
@import("c/loadable_extension.zig")
|
||||
else
|
||||
@cImport({
|
||||
@cInclude("sqlite3.h");
|
||||
});
|
||||
|
||||
// versionGreaterThanOrEqualTo returns true if the SQLite version is >= to the major.minor.patch provided.
|
||||
pub fn versionGreaterThanOrEqualTo(major: u8, minor: u8, patch: u8) bool {
|
||||
return c.SQLITE_VERSION_NUMBER >= @as(u32, major) * 1000000 + @as(u32, minor) * 1000 + @as(u32, patch);
|
||||
}
|
||||
|
||||
comptime {
|
||||
if (!versionGreaterThanOrEqualTo(3, 21, 0)) {
|
||||
@compileError("must use SQLite >= 3.21.0");
|
||||
}
|
||||
}
|
12037
src/.deps/zig-sqlite/c/loadable-ext-sqlite3.h
Normal file
12037
src/.deps/zig-sqlite/c/loadable-ext-sqlite3.h
Normal file
File diff suppressed because it is too large
Load Diff
433
src/.deps/zig-sqlite/c/loadable-ext-sqlite3ext.h
Normal file
433
src/.deps/zig-sqlite/c/loadable-ext-sqlite3ext.h
Normal file
|
@ -0,0 +1,433 @@
|
|||
/*
|
||||
** 2006 June 7
|
||||
**
|
||||
** The author disclaims copyright to this source code. In place of
|
||||
** a legal notice, here is a blessing:
|
||||
**
|
||||
** May you do good and not evil.
|
||||
** May you find forgiveness for yourself and forgive others.
|
||||
** May you share freely, never taking more than you give.
|
||||
**
|
||||
*************************************************************************
|
||||
** This header file defines the SQLite interface for use by
|
||||
** shared libraries that want to be imported as extensions into
|
||||
** an SQLite instance. Shared libraries that intend to be loaded
|
||||
** as extensions by SQLite should #include this file instead of
|
||||
** sqlite3.h.
|
||||
*/
|
||||
#ifndef SQLITE3EXT_H
|
||||
#define SQLITE3EXT_H
|
||||
#include "loadable-ext-sqlite3.h"
|
||||
|
||||
/*
|
||||
** The following structure holds pointers to all of the SQLite API
|
||||
** routines.
|
||||
**
|
||||
** WARNING: In order to maintain backwards compatibility, add new
|
||||
** interfaces to the end of this structure only. If you insert new
|
||||
** interfaces in the middle of this structure, then older different
|
||||
** versions of SQLite will not be able to load each other's shared
|
||||
** libraries!
|
||||
*/
|
||||
struct sqlite3_api_routines {
|
||||
void * (*aggregate_context)(sqlite3_context*,int nBytes);
|
||||
int (*aggregate_count)(sqlite3_context*);
|
||||
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
|
||||
int (*bind_double)(sqlite3_stmt*,int,double);
|
||||
int (*bind_int)(sqlite3_stmt*,int,int);
|
||||
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
|
||||
int (*bind_null)(sqlite3_stmt*,int);
|
||||
int (*bind_parameter_count)(sqlite3_stmt*);
|
||||
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
|
||||
const char * (*bind_parameter_name)(sqlite3_stmt*,int);
|
||||
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
|
||||
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
|
||||
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
|
||||
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
|
||||
int (*busy_timeout)(sqlite3*,int ms);
|
||||
int (*changes)(sqlite3*);
|
||||
int (*close)(sqlite3*);
|
||||
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const char*));
|
||||
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const void*));
|
||||
const void * (*column_blob)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_count)(sqlite3_stmt*pStmt);
|
||||
const char * (*column_database_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_database_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_decltype)(sqlite3_stmt*,int i);
|
||||
const void * (*column_decltype16)(sqlite3_stmt*,int);
|
||||
double (*column_double)(sqlite3_stmt*,int iCol);
|
||||
int (*column_int)(sqlite3_stmt*,int iCol);
|
||||
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
|
||||
const char * (*column_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_origin_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_origin_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_table_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_table_name16)(sqlite3_stmt*,int);
|
||||
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
|
||||
const void * (*column_text16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_type)(sqlite3_stmt*,int iCol);
|
||||
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
|
||||
void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
|
||||
int (*complete)(const char*sql);
|
||||
int (*complete16)(const void*sql);
|
||||
int (*create_collation)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_collation16)(sqlite3*,const void*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_function16)(sqlite3*,const void*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
|
||||
int (*data_count)(sqlite3_stmt*pStmt);
|
||||
sqlite3 * (*db_handle)(sqlite3_stmt*);
|
||||
int (*declare_vtab)(sqlite3*,const char*);
|
||||
int (*enable_shared_cache)(int);
|
||||
int (*errcode)(sqlite3*db);
|
||||
const char * (*errmsg)(sqlite3*);
|
||||
const void * (*errmsg16)(sqlite3*);
|
||||
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
|
||||
int (*expired)(sqlite3_stmt*);
|
||||
int (*finalize)(sqlite3_stmt*pStmt);
|
||||
void (*free)(void*);
|
||||
void (*free_table)(char**result);
|
||||
int (*get_autocommit)(sqlite3*);
|
||||
void * (*get_auxdata)(sqlite3_context*,int);
|
||||
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
|
||||
int (*global_recover)(void);
|
||||
void (*interruptx)(sqlite3*);
|
||||
sqlite_int64 (*last_insert_rowid)(sqlite3*);
|
||||
const char * (*libversion)(void);
|
||||
int (*libversion_number)(void);
|
||||
void *(*malloc)(int);
|
||||
char * (*mprintf)(const char*,...);
|
||||
int (*open)(const char*,sqlite3**);
|
||||
int (*open16)(const void*,sqlite3**);
|
||||
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
|
||||
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
|
||||
void *(*realloc)(void*,int);
|
||||
int (*reset)(sqlite3_stmt*pStmt);
|
||||
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_double)(sqlite3_context*,double);
|
||||
void (*result_error)(sqlite3_context*,const char*,int);
|
||||
void (*result_error16)(sqlite3_context*,const void*,int);
|
||||
void (*result_int)(sqlite3_context*,int);
|
||||
void (*result_int64)(sqlite3_context*,sqlite_int64);
|
||||
void (*result_null)(sqlite3_context*);
|
||||
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
|
||||
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_value)(sqlite3_context*,sqlite3_value*);
|
||||
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
|
||||
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
|
||||
const char*,const char*),void*);
|
||||
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
|
||||
char * (*xsnprintf)(int,char*,const char*,...);
|
||||
int (*step)(sqlite3_stmt*);
|
||||
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
|
||||
char const**,char const**,int*,int*,int*);
|
||||
void (*thread_cleanup)(void);
|
||||
int (*total_changes)(sqlite3*);
|
||||
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
|
||||
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
|
||||
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
|
||||
sqlite_int64),void*);
|
||||
void * (*user_data)(sqlite3_context*);
|
||||
const void * (*value_blob)(sqlite3_value*);
|
||||
int (*value_bytes)(sqlite3_value*);
|
||||
int (*value_bytes16)(sqlite3_value*);
|
||||
double (*value_double)(sqlite3_value*);
|
||||
int (*value_int)(sqlite3_value*);
|
||||
sqlite_int64 (*value_int64)(sqlite3_value*);
|
||||
int (*value_numeric_type)(sqlite3_value*);
|
||||
const unsigned char * (*value_text)(sqlite3_value*);
|
||||
const void * (*value_text16)(sqlite3_value*);
|
||||
const void * (*value_text16be)(sqlite3_value*);
|
||||
const void * (*value_text16le)(sqlite3_value*);
|
||||
int (*value_type)(sqlite3_value*);
|
||||
char *(*vmprintf)(const char*,va_list);
|
||||
/* Added ??? */
|
||||
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
|
||||
/* Added by 3.3.13 */
|
||||
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
int (*clear_bindings)(sqlite3_stmt*);
|
||||
/* Added by 3.4.1 */
|
||||
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
|
||||
void (*xDestroy)(void *));
|
||||
/* Added by 3.5.0 */
|
||||
int (*bind_zeroblob)(sqlite3_stmt*,int,int);
|
||||
int (*blob_bytes)(sqlite3_blob*);
|
||||
int (*blob_close)(sqlite3_blob*);
|
||||
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
|
||||
int,sqlite3_blob**);
|
||||
int (*blob_read)(sqlite3_blob*,void*,int,int);
|
||||
int (*blob_write)(sqlite3_blob*,const void*,int,int);
|
||||
int (*create_collation_v2)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*),
|
||||
void(*)(void*));
|
||||
int (*file_control)(sqlite3*,const char*,int,void*);
|
||||
sqlite3_int64 (*memory_highwater)(int);
|
||||
sqlite3_int64 (*memory_used)(void);
|
||||
sqlite3_mutex *(*mutex_alloc)(int);
|
||||
void (*mutex_enter)(sqlite3_mutex*);
|
||||
void (*mutex_free)(sqlite3_mutex*);
|
||||
void (*mutex_leave)(sqlite3_mutex*);
|
||||
int (*mutex_try)(sqlite3_mutex*);
|
||||
int (*open_v2)(const char*,sqlite3**,int,const char*);
|
||||
int (*release_memory)(int);
|
||||
void (*result_error_nomem)(sqlite3_context*);
|
||||
void (*result_error_toobig)(sqlite3_context*);
|
||||
int (*sleep)(int);
|
||||
void (*soft_heap_limit)(int);
|
||||
sqlite3_vfs *(*vfs_find)(const char*);
|
||||
int (*vfs_register)(sqlite3_vfs*,int);
|
||||
int (*vfs_unregister)(sqlite3_vfs*);
|
||||
int (*xthreadsafe)(void);
|
||||
void (*result_zeroblob)(sqlite3_context*,int);
|
||||
void (*result_error_code)(sqlite3_context*,int);
|
||||
int (*test_control)(int, ...);
|
||||
void (*randomness)(int,void*);
|
||||
sqlite3 *(*context_db_handle)(sqlite3_context*);
|
||||
int (*extended_result_codes)(sqlite3*,int);
|
||||
int (*limit)(sqlite3*,int,int);
|
||||
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
|
||||
const char *(*sql)(sqlite3_stmt*);
|
||||
int (*status)(int,int*,int*,int);
|
||||
int (*backup_finish)(sqlite3_backup*);
|
||||
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
|
||||
int (*backup_pagecount)(sqlite3_backup*);
|
||||
int (*backup_remaining)(sqlite3_backup*);
|
||||
int (*backup_step)(sqlite3_backup*,int);
|
||||
const char *(*compileoption_get)(int);
|
||||
int (*compileoption_used)(const char*);
|
||||
int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void(*xDestroy)(void*));
|
||||
int (*db_config)(sqlite3*,int,...);
|
||||
sqlite3_mutex *(*db_mutex)(sqlite3*);
|
||||
int (*db_status)(sqlite3*,int,int*,int*,int);
|
||||
int (*extended_errcode)(sqlite3*);
|
||||
void (*log)(int,const char*,...);
|
||||
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
|
||||
const char *(*sourceid)(void);
|
||||
int (*stmt_status)(sqlite3_stmt*,int,int);
|
||||
int (*strnicmp)(const char*,const char*,int);
|
||||
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
|
||||
int (*wal_autocheckpoint)(sqlite3*,int);
|
||||
int (*wal_checkpoint)(sqlite3*,const char*);
|
||||
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
|
||||
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
|
||||
int (*vtab_config)(sqlite3*,int op,...);
|
||||
int (*vtab_on_conflict)(sqlite3*);
|
||||
/* Version 3.7.16 and later */
|
||||
int (*close_v2)(sqlite3*);
|
||||
const char *(*db_filename)(sqlite3*,const char*);
|
||||
int (*db_readonly)(sqlite3*,const char*);
|
||||
int (*db_release_memory)(sqlite3*);
|
||||
const char *(*errstr)(int);
|
||||
int (*stmt_busy)(sqlite3_stmt*);
|
||||
int (*stmt_readonly)(sqlite3_stmt*);
|
||||
int (*stricmp)(const char*,const char*);
|
||||
int (*uri_boolean)(const char*,const char*,int);
|
||||
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
|
||||
const char *(*uri_parameter)(const char*,const char*);
|
||||
char *(*xvsnprintf)(int,char*,const char*,va_list);
|
||||
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
|
||||
/* Version 3.8.7 and later */
|
||||
int (*auto_extension)(void(*)(void));
|
||||
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
|
||||
void(*)(void*),unsigned char);
|
||||
int (*cancel_auto_extension)(void(*)(void));
|
||||
int (*load_extension)(sqlite3*,const char*,const char*,char**);
|
||||
void *(*malloc64)(sqlite3_uint64);
|
||||
sqlite3_uint64 (*msize)(void*);
|
||||
void *(*realloc64)(void*,sqlite3_uint64);
|
||||
void (*reset_auto_extension)(void);
|
||||
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
|
||||
void(*)(void*), unsigned char);
|
||||
int (*strglob)(const char*,const char*);
|
||||
/* Version 3.8.11 and later */
|
||||
sqlite3_value *(*value_dup)(const sqlite3_value*);
|
||||
void (*value_free)(sqlite3_value*);
|
||||
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
|
||||
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
|
||||
/* Version 3.9.0 and later */
|
||||
unsigned int (*value_subtype)(sqlite3_value*);
|
||||
void (*result_subtype)(sqlite3_context*,unsigned int);
|
||||
/* Version 3.10.0 and later */
|
||||
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
|
||||
int (*strlike)(const char*,const char*,unsigned int);
|
||||
int (*db_cacheflush)(sqlite3*);
|
||||
/* Version 3.12.0 and later */
|
||||
int (*system_errno)(sqlite3*);
|
||||
/* Version 3.14.0 and later */
|
||||
int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*);
|
||||
char *(*expanded_sql)(sqlite3_stmt*);
|
||||
/* Version 3.18.0 and later */
|
||||
void (*set_last_insert_rowid)(sqlite3*,sqlite3_int64);
|
||||
/* Version 3.20.0 and later */
|
||||
int (*prepare_v3)(sqlite3*,const char*,int,unsigned int,
|
||||
sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v3)(sqlite3*,const void*,int,unsigned int,
|
||||
sqlite3_stmt**,const void**);
|
||||
int (*bind_pointer)(sqlite3_stmt*,int,void*,const char*,void(*)(void*));
|
||||
void (*result_pointer)(sqlite3_context*,void*,const char*,void(*)(void*));
|
||||
void *(*value_pointer)(sqlite3_value*,const char*);
|
||||
int (*vtab_nochange)(sqlite3_context*);
|
||||
int (*value_nochange)(sqlite3_value*);
|
||||
const char *(*vtab_collation)(sqlite3_index_info*,int);
|
||||
/* Version 3.24.0 and later */
|
||||
int (*keyword_count)(void);
|
||||
int (*keyword_name)(int,const char**,int*);
|
||||
int (*keyword_check)(const char*,int);
|
||||
sqlite3_str *(*str_new)(sqlite3*);
|
||||
char *(*str_finish)(sqlite3_str*);
|
||||
void (*str_appendf)(sqlite3_str*, const char *zFormat, ...);
|
||||
void (*str_vappendf)(sqlite3_str*, const char *zFormat, va_list);
|
||||
void (*str_append)(sqlite3_str*, const char *zIn, int N);
|
||||
void (*str_appendall)(sqlite3_str*, const char *zIn);
|
||||
void (*str_appendchar)(sqlite3_str*, int N, char C);
|
||||
void (*str_reset)(sqlite3_str*);
|
||||
int (*str_errcode)(sqlite3_str*);
|
||||
int (*str_length)(sqlite3_str*);
|
||||
char *(*str_value)(sqlite3_str*);
|
||||
/* Version 3.25.0 and later */
|
||||
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void (*xValue)(sqlite3_context*),
|
||||
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
|
||||
void(*xDestroy)(void*));
|
||||
/* Version 3.26.0 and later */
|
||||
const char *(*normalized_sql)(sqlite3_stmt*);
|
||||
/* Version 3.28.0 and later */
|
||||
int (*stmt_isexplain)(sqlite3_stmt*);
|
||||
int (*value_frombind)(sqlite3_value*);
|
||||
/* Version 3.30.0 and later */
|
||||
int (*drop_modules)(sqlite3*,const char**);
|
||||
/* Version 3.31.0 and later */
|
||||
sqlite3_int64 (*hard_heap_limit64)(sqlite3_int64);
|
||||
const char *(*uri_key)(const char*,int);
|
||||
const char *(*filename_database)(const char*);
|
||||
const char *(*filename_journal)(const char*);
|
||||
const char *(*filename_wal)(const char*);
|
||||
/* Version 3.32.0 and later */
|
||||
char *(*create_filename)(const char*,const char*,const char*,
|
||||
int,const char**);
|
||||
void (*free_filename)(char*);
|
||||
sqlite3_file *(*database_file_object)(const char*);
|
||||
/* Version 3.34.0 and later */
|
||||
int (*txn_state)(sqlite3*,const char*);
|
||||
/* Version 3.36.1 and later */
|
||||
sqlite3_int64 (*changes64)(sqlite3*);
|
||||
sqlite3_int64 (*total_changes64)(sqlite3*);
|
||||
/* Version 3.37.0 and later */
|
||||
int (*autovacuum_pages)(sqlite3*,
|
||||
unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int),
|
||||
void*, void(*)(void*));
|
||||
/* Version 3.38.0 and later */
|
||||
int (*error_offset)(sqlite3*);
|
||||
int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**);
|
||||
int (*vtab_distinct)(sqlite3_index_info*);
|
||||
int (*vtab_in)(sqlite3_index_info*,int,int);
|
||||
int (*vtab_in_first)(sqlite3_value*,sqlite3_value**);
|
||||
int (*vtab_in_next)(sqlite3_value*,sqlite3_value**);
|
||||
/* Version 3.39.0 and later */
|
||||
int (*deserialize)(sqlite3*,const char*,unsigned char*,
|
||||
sqlite3_int64,sqlite3_int64,unsigned);
|
||||
unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*,
|
||||
unsigned int);
|
||||
const char *(*db_name)(sqlite3*,int);
|
||||
};
|
||||
|
||||
/*
|
||||
** This is the function signature used for all extension entry points. It
|
||||
** is also defined in the file "loadext.c".
|
||||
*/
|
||||
typedef int (*sqlite3_loadext_entry)(
|
||||
sqlite3 *db, /* Handle to the database. */
|
||||
char **pzErrMsg, /* Used to set error string on failure. */
|
||||
const sqlite3_api_routines *pThunk /* Extension API function pointers. */
|
||||
);
|
||||
|
||||
/*
|
||||
** The following macros redefine the API routines so that they are
|
||||
** redirected through the global sqlite3_api structure.
|
||||
**
|
||||
** This header file is also used by the loadext.c source file
|
||||
** (part of the main SQLite library - not an extension) so that
|
||||
** it can get access to the sqlite3_api_routines structure
|
||||
** definition. But the main library does not want to redefine
|
||||
** the API. So the redefinition macros are only valid if the
|
||||
** SQLITE_CORE macros is undefined.
|
||||
*/
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#endif
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#endif
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#endif
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#endif
|
||||
/* Version 3.7.16 and later */
|
||||
/* Version 3.8.7 and later */
|
||||
/* Version 3.8.11 and later */
|
||||
/* Version 3.9.0 and later */
|
||||
/* Version 3.10.0 and later */
|
||||
/* Version 3.12.0 and later */
|
||||
/* Version 3.14.0 and later */
|
||||
/* Version 3.18.0 and later */
|
||||
/* Version 3.20.0 and later */
|
||||
/* Version 3.22.0 and later */
|
||||
/* Version 3.24.0 and later */
|
||||
/* Version 3.25.0 and later */
|
||||
/* Version 3.26.0 and later */
|
||||
/* Version 3.28.0 and later */
|
||||
/* Version 3.30.0 and later */
|
||||
/* Version 3.31.0 and later */
|
||||
/* Version 3.32.0 and later */
|
||||
/* Version 3.34.0 and later */
|
||||
/* Version 3.36.1 and later */
|
||||
/* Version 3.37.0 and later */
|
||||
/* Version 3.38.0 and later */
|
||||
/* Version 3.39.0 and later */
|
||||
#ifndef SQLITE_OMIT_DESERIALIZE
|
||||
#endif
|
||||
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
|
||||
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
/* This case when the file really is being compiled as a loadable
|
||||
** extension */
|
||||
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
|
||||
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
|
||||
# define SQLITE_EXTENSION_INIT3 \
|
||||
extern const sqlite3_api_routines *sqlite3_api;
|
||||
#else
|
||||
/* This case when the file is being statically linked into the
|
||||
** application */
|
||||
# define SQLITE_EXTENSION_INIT1 /*no-op*/
|
||||
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
|
||||
# define SQLITE_EXTENSION_INIT3 /*no-op*/
|
||||
#endif
|
||||
|
||||
#endif /* SQLITE3EXT_H */
|
809
src/.deps/zig-sqlite/c/loadable_extension.zig
Normal file
809
src/.deps/zig-sqlite/c/loadable_extension.zig
Normal file
|
@ -0,0 +1,809 @@
|
|||
const c = @cImport({
|
||||
@cInclude("loadable-ext-sqlite3ext.h");
|
||||
});
|
||||
|
||||
pub usingnamespace c;
|
||||
|
||||
pub var sqlite3_api: [*c]c.sqlite3_api_routines = null;
|
||||
|
||||
pub const sqlite3_transfer_bindings = @compileError("sqlite3_transfer_bindings is deprecated");
|
||||
pub const sqlite3_global_recover = @compileError("sqlite3_global_recover is deprecated");
|
||||
pub const sqlite3_expired = @compileError("sqlite3_expired is deprecated");
|
||||
|
||||
pub const sqlite3_mprintf = @compileError("sqlite3_mprintf can't be implemented in Zig");
|
||||
pub const sqlite3_snprintf = @compileError("sqlite3_snprintf can't be implemented in Zig");
|
||||
pub const sqlite3_vmprintf = @compileError("sqlite3_vmprintf can't be implemented in Zig");
|
||||
pub const sqlite3_vsnprintf = @compileError("sqlite3_vsnprintf can't be implemented in Zig");
|
||||
pub const sqlite3_test_control = @compileError("sqlite3_test_control can't be implemented in Zig");
|
||||
pub const sqlite3_db_config = @compileError("sqlite3_db_config can't be implemented in Zig");
|
||||
pub const sqlite3_log = @compileError("sqlite3_log can't be implemented in Zig");
|
||||
pub const sqlite3_vtab_config = @compileError("sqlite3_vtab_config can't be implemented in Zig");
|
||||
pub const sqlite3_uri_vsnprintf = @compileError("sqlite3_uri_vsnprintf can't be implemented in Zig");
|
||||
pub const sqlite3_str_appendf = @compileError("sqlite3_str_appendf can't be implemented in Zig");
|
||||
pub const sqlite3_str_vappendf = @compileError("sqlite3_str_vappendf can't be implemented in Zig");
|
||||
|
||||
pub export fn sqlite3_aggregate_context(p: ?*c.sqlite3_context, nBytes: c_int) callconv(.C) ?*anyopaque {
|
||||
return sqlite3_api.*.aggregate_context.?(p, nBytes);
|
||||
}
|
||||
pub export fn sqlite3_bind_blob(pStmt: ?*c.sqlite3_stmt, i: c_int, zData: ?*const anyopaque, nData: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.bind_blob.?(pStmt, i, zData, nData, xDel);
|
||||
}
|
||||
pub export fn sqlite3_bind_double(pStmt: ?*c.sqlite3_stmt, i: c_int, rValue: f64) callconv(.C) c_int {
|
||||
return sqlite3_api.*.bind_double.?(pStmt, i, rValue);
|
||||
}
|
||||
pub export fn sqlite3_bind_int(pStmt: ?*c.sqlite3_stmt, i: c_int, iValue: c_int) callconv(.C) c_int {
|
||||
return sqlite3_api.*.bind_int.?(pStmt, i, iValue);
|
||||
}
|
||||
pub export fn sqlite3_bind_int64(pStmt: ?*c.sqlite3_stmt, i: c_int, iValue: c.sqlite3_int64) c_int {
|
||||
return sqlite3_api.*.bind_int64.?(pStmt, i, iValue);
|
||||
}
|
||||
pub export fn sqlite3_bind_null(pStmt: ?*c.sqlite3_stmt, i: c_int) c_int {
|
||||
return sqlite3_api.*.bind_null.?(pStmt, i);
|
||||
}
|
||||
pub export fn sqlite3_bind_parameter_count(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.bind_parameter_count.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_bind_parameter_index(pStmt: ?*c.sqlite3_stmt, zName: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.bind_parameter_index.?(pStmt, zName);
|
||||
}
|
||||
pub export fn sqlite3_bind_parameter_name(pStmt: ?*c.sqlite3_stmt, i: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.bind_parameter_name.?(pStmt, i);
|
||||
}
|
||||
pub export fn sqlite3_bind_text(pStmt: ?*c.sqlite3_stmt, i: c_int, zData: [*c]const u8, nData: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.bind_text.?(pStmt, i, zData, nData, xDel);
|
||||
}
|
||||
pub export fn sqlite3_bind_text16(pStmt: ?*c.sqlite3_stmt, i: c_int, zData: ?*const anyopaque, nData: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.bind_text16.?(pStmt, i, zData, nData, xDel);
|
||||
}
|
||||
pub export fn sqlite3_bind_value(pStmt: ?*c.sqlite3_stmt, i: c_int, pValue: ?*const c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.bind_value.?(pStmt, i, pValue);
|
||||
}
|
||||
pub export fn sqlite3_busy_handler(db: ?*c.sqlite3, xBusy: ?*const fn (?*anyopaque, c_int) callconv(.C) c_int, pArg: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.busy_handler.?(db, xBusy, pArg);
|
||||
}
|
||||
pub export fn sqlite3_busy_timeout(db: ?*c.sqlite3, ms: c_int) c_int {
|
||||
return sqlite3_api.*.busy_timeout.?(db, ms);
|
||||
}
|
||||
pub export fn sqlite3_changes(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.changes.?(db);
|
||||
}
|
||||
pub export fn sqlite3_close(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.close.?(db);
|
||||
}
|
||||
pub export fn sqlite3_collation_needed(db: ?*c.sqlite3, pCollNeededArg: ?*anyopaque, xCollNeeded: ?*const fn (?*anyopaque, ?*c.sqlite3, c_int, [*c]const u8) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.collation_needed.?(db, pCollNeededArg, xCollNeeded);
|
||||
}
|
||||
pub export fn sqlite3_collation_needed16(db: ?*c.sqlite3, pCollNeededArg: ?*anyopaque, xCollNeeded16: ?*const fn (?*anyopaque, ?*c.sqlite3, c_int, ?*const anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.collation_needed16.?(db, pCollNeededArg, xCollNeeded16);
|
||||
}
|
||||
pub export fn sqlite3_column_blob(pStmt: ?*c.sqlite3_stmt, iCol: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_blob.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_bytes(pStmt: ?*c.sqlite3_stmt, iCol: c_int) c_int {
|
||||
return sqlite3_api.*.column_bytes.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_bytes16(pStmt: ?*c.sqlite3_stmt, iCol: c_int) c_int {
|
||||
return sqlite3_api.*.column_bytes16.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_count(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.column_count.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_column_database_name(pStmt: ?*c.sqlite3_stmt, iCol: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_database_name.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_database_name16(pStmt: ?*c.sqlite3_stmt, iCol: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_database_name16.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_decltype(pStmt: ?*c.sqlite3_stmt, iCol: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_decltype.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_decltype16(pStmt: ?*c.sqlite3_stmt, iCol: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_decltype16.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_double(pStmt: ?*c.sqlite3_stmt, iCol: c_int) f64 {
|
||||
return sqlite3_api.*.column_double.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_int(pStmt: ?*c.sqlite3_stmt, iCol: c_int) c_int {
|
||||
return sqlite3_api.*.column_int.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_int64(pStmt: ?*c.sqlite3_stmt, iCol: c_int) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.column_int64.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_name(pStmt: ?*c.sqlite3_stmt, N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_name.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_name16(pStmt: ?*c.sqlite3_stmt, N: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_name16.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_origin_name(pStmt: ?*c.sqlite3_stmt, N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_origin_name.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_origin_name16(pStmt: ?*c.sqlite3_stmt, N: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_origin_name16.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_table_name(pStmt: ?*c.sqlite3_stmt, N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_table_name.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_table_name16(pStmt: ?*c.sqlite3_stmt, N: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_table_name16.?(pStmt, N);
|
||||
}
|
||||
pub export fn sqlite3_column_text(pStmt: ?*c.sqlite3_stmt, iCol: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.column_text.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_text16(pStmt: ?*c.sqlite3_stmt, iCol: c_int) ?*const anyopaque {
|
||||
return sqlite3_api.*.column_text16.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_type(pStmt: ?*c.sqlite3_stmt, iCol: c_int) c_int {
|
||||
return sqlite3_api.*.column_type.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_column_value(pStmt: ?*c.sqlite3_stmt, iCol: c_int) ?*c.sqlite3_value {
|
||||
return sqlite3_api.*.column_value.?(pStmt, iCol);
|
||||
}
|
||||
pub export fn sqlite3_commit_hook(db: ?*c.sqlite3, xCallback: ?*const fn (?*anyopaque) callconv(.C) c_int, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.commit_hook.?(db, xCallback, pArg);
|
||||
}
|
||||
pub export fn sqlite3_complete(sql: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.complete.?(sql);
|
||||
}
|
||||
pub export fn sqlite3_complete16(sql: ?*const anyopaque) c_int {
|
||||
return sqlite3_api.*.complete16.?(sql);
|
||||
}
|
||||
pub export fn sqlite3_create_collation(db: ?*c.sqlite3, zName: [*c]const u8, eTextRep: c_int, pArg: ?*anyopaque, xCompare: ?*const fn (?*anyopaque, c_int, ?*const anyopaque, c_int, ?*const anyopaque) callconv(.C) c_int) c_int {
|
||||
return sqlite3_api.*.create_collation.?(db, zName, eTextRep, pArg, xCompare);
|
||||
}
|
||||
pub export fn sqlite3_create_collation16(db: ?*c.sqlite3, zName: ?*const anyopaque, eTextRep: c_int, pArg: ?*anyopaque, xCompare: ?*const fn (?*anyopaque, c_int, ?*const anyopaque, c_int, ?*const anyopaque) callconv(.C) c_int) c_int {
|
||||
return sqlite3_api.*.create_collation16.?(db, zName, eTextRep, pArg, xCompare);
|
||||
}
|
||||
pub export fn sqlite3_create_function(db: ?*c.sqlite3, zFunctionName: [*c]const u8, nArg: c_int, eTextRep: c_int, pApp: ?*anyopaque, xFunc: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void, xStep: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void, xFinal: ?*const fn (?*c.sqlite3_context) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.create_function.?(db, zFunctionName, nArg, eTextRep, pApp, xFunc, xStep, xFinal);
|
||||
}
|
||||
pub export fn sqlite3_create_function16(db: ?*c.sqlite3, zFunctionName: ?*const anyopaque, nArg: c_int, eTextRep: c_int, pApp: ?*anyopaque, xFunc: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void, xStep: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void, xFinal: ?*const fn (?*c.sqlite3_context) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.create_function16.?(db, zFunctionName, nArg, eTextRep, pApp, xFunc, xStep, xFinal);
|
||||
}
|
||||
pub export fn sqlite3_create_module(db: ?*c.sqlite3, zName: [*c]const u8, pModule: [*c]const c.sqlite3_module, pAux: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.create_module.?(db, zName, pModule, pAux);
|
||||
}
|
||||
pub export fn sqlite3_create_module_v2(db: ?*c.sqlite3, zName: [*c]const u8, pModule: [*c]const c.sqlite3_module, pAux: ?*anyopaque, xDestroy: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.create_module_v2.?(db, zName, pModule, pAux, xDestroy);
|
||||
}
|
||||
pub export fn sqlite3_data_count(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.data_count.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_db_handle(pStmt: ?*c.sqlite3_stmt) ?*c.sqlite3 {
|
||||
return sqlite3_api.*.db_handle.?(pStmt);
|
||||
}
|
||||
|
||||
pub export fn sqlite3_declare_vtab(db: ?*c.sqlite3, zSQL: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.declare_vtab.?(db, zSQL);
|
||||
}
|
||||
pub export fn sqlite3_enable_shared_cache(enable: c_int) c_int {
|
||||
return sqlite3_api.*.enable_shared_cache.?(enable);
|
||||
}
|
||||
pub export fn sqlite3_errcode(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.errcode.?(db);
|
||||
}
|
||||
pub export fn sqlite3_errmsg(db: ?*c.sqlite3) [*c]const u8 {
|
||||
return sqlite3_api.*.errmsg.?(db);
|
||||
}
|
||||
pub export fn sqlite3_errmsg16(db: ?*c.sqlite3) ?*const anyopaque {
|
||||
return sqlite3_api.*.errmsg16.?(db);
|
||||
}
|
||||
pub export fn sqlite3_exec(db: ?*c.sqlite3, zSql: [*c]const u8, xCallback: ?*const fn (?*anyopaque, c_int, [*c][*c]u8, [*c][*c]u8) callconv(.C) c_int, pArg: ?*anyopaque, pzErrMsg: [*c][*c]u8) c_int {
|
||||
return sqlite3_api.*.exec.?(db, zSql, xCallback, pArg, pzErrMsg);
|
||||
}
|
||||
pub export fn sqlite3_finalize(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.finalize.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_free(p: ?*anyopaque) void {
|
||||
return sqlite3_api.*.free.?(p);
|
||||
}
|
||||
pub export fn sqlite3_free_table(result: [*c][*c]u8) void {
|
||||
return sqlite3_api.*.free_table.?(result);
|
||||
}
|
||||
pub export fn sqlite3_get_autocommit(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.get_autocommit.?(db);
|
||||
}
|
||||
pub export fn sqlite3_get_auxdata(pCtx: ?*c.sqlite3_context, iArg: c_int) ?*anyopaque {
|
||||
return sqlite3_api.*.get_auxdata.?(pCtx, iArg);
|
||||
}
|
||||
pub export fn sqlite3_get_table(db: ?*c.sqlite3, zSql: [*c]const u8, pazResult: [*c][*c][*c]u8, pnRow: [*c]c_int, pnColumn: [*c]c_int, pzErrMsg: [*c][*c]u8) c_int {
|
||||
return sqlite3_api.*.get_table.?(db, zSql, pazResult, pnRow, pnColumn, pzErrMsg);
|
||||
}
|
||||
pub export fn sqlite3_interrupt(db: ?*c.sqlite3) void {
|
||||
return sqlite3_api.*.interruptx.?(db);
|
||||
}
|
||||
pub export fn sqlite3_last_insert_rowid(db: ?*c.sqlite3) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.last_insert_rowid.?(db);
|
||||
}
|
||||
pub export fn sqlite3_libversion() callconv(.C) [*c]const u8 {
|
||||
return sqlite3_api.*.libversion.?();
|
||||
}
|
||||
pub export fn sqlite3_libversion_number() c_int {
|
||||
return sqlite3_api.*.libversion_number.?();
|
||||
}
|
||||
pub export fn sqlite3_malloc(n: c_int) ?*anyopaque {
|
||||
return sqlite3_api.*.malloc.?(n);
|
||||
}
|
||||
pub export fn sqlite3_open(filename: [*c]const u8, ppDb: [*c]?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.open.?(filename, ppDb);
|
||||
}
|
||||
pub export fn sqlite3_open16(filename: ?*const anyopaque, ppDb: [*c]?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.open16.?(filename, ppDb);
|
||||
}
|
||||
pub export fn sqlite3_prepare(db: ?*c.sqlite3, zSql: [*c]const u8, nByte: c_int, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c][*c]const u8) c_int {
|
||||
return sqlite3_api.*.prepare.?(db, zSql, nByte, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_prepare16(db: ?*c.sqlite3, zSql: ?*const anyopaque, nByte: c_int, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c]?*const anyopaque) c_int {
|
||||
return sqlite3_api.*.prepare16.?(db, zSql, nByte, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_prepare_v2(db: ?*c.sqlite3, zSql: [*c]const u8, nByte: c_int, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c][*c]const u8) c_int {
|
||||
return sqlite3_api.*.prepare_v2.?(db, zSql, nByte, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_prepare16_v2(db: ?*c.sqlite3, zSql: ?*const anyopaque, nByte: c_int, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c]?*const anyopaque) c_int {
|
||||
return sqlite3_api.*.prepare16_v2.?(db, zSql, nByte, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_profile(db: ?*c.sqlite3, xProfile: ?*const fn (?*anyopaque, [*c]const u8, c.sqlite3_uint64) callconv(.C) void, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.profile.?(db, xProfile, pArg);
|
||||
}
|
||||
pub export fn sqlite3_progress_handler(db: ?*c.sqlite3, nOps: c_int, xProgress: ?*const fn (?*anyopaque) callconv(.C) c_int, pArg: ?*anyopaque) void {
|
||||
return sqlite3_api.*.progress_handler.?(db, nOps, xProgress, pArg);
|
||||
}
|
||||
pub export fn sqlite3_realloc(pOld: ?*anyopaque, n: c_int) ?*anyopaque {
|
||||
return sqlite3_api.*.realloc.?(pOld, n);
|
||||
}
|
||||
pub export fn sqlite3_reset(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.reset.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_result_blob(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_blob.?(pCtx, z, n, xDel);
|
||||
}
|
||||
|
||||
pub export fn sqlite3_result_double(pCtx: ?*c.sqlite3_context, rVal: f64) void {
|
||||
return sqlite3_api.*.result_double.?(pCtx, rVal);
|
||||
}
|
||||
|
||||
pub export fn sqlite3_result_error(pCtx: ?*c.sqlite3_context, z: [*c]const u8, n: c_int) void {
|
||||
return sqlite3_api.*.result_error.?(pCtx, z, n);
|
||||
}
|
||||
pub export fn sqlite3_result_error16(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c_int) void {
|
||||
return sqlite3_api.*.result_error16.?(pCtx, z, n);
|
||||
}
|
||||
pub export fn sqlite3_result_int(pCtx: ?*c.sqlite3_context, iVal: c_int) void {
|
||||
return sqlite3_api.*.result_int.?(pCtx, iVal);
|
||||
}
|
||||
pub export fn sqlite3_result_int64(pCtx: ?*c.sqlite3_context, iVal: c.sqlite3_int64) void {
|
||||
return sqlite3_api.*.result_int64.?(pCtx, iVal);
|
||||
}
|
||||
pub export fn sqlite3_result_null(pCtx: ?*c.sqlite3_context) void {
|
||||
return sqlite3_api.*.result_null.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_result_text(pCtx: ?*c.sqlite3_context, z: [*c]const u8, n: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_text.?(pCtx, z, n, xDel);
|
||||
}
|
||||
pub export fn sqlite3_result_text16(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_text16.?(pCtx, z, n, xDel);
|
||||
}
|
||||
pub export fn sqlite3_result_text16be(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_text16be.?(pCtx, z, n, xDel);
|
||||
}
|
||||
pub export fn sqlite3_result_text16le(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_text16le.?(pCtx, z, n, xDel);
|
||||
}
|
||||
pub export fn sqlite3_result_value(pCtx: ?*c.sqlite3_context, pValue: ?*c.sqlite3_value) void {
|
||||
return sqlite3_api.*.result_value.?(pCtx, pValue);
|
||||
}
|
||||
pub export fn sqlite3_rollback_hook(db: ?*c.sqlite3, xCallback: ?*const fn (?*anyopaque) callconv(.C) void, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.rollback_hook.?(db, xCallback, pArg);
|
||||
}
|
||||
pub export fn sqlite3_set_authorizer(db: ?*c.sqlite3, xAuth: ?*const fn (?*anyopaque, c_int, [*c]const u8, [*c]const u8, [*c]const u8, [*c]const u8) callconv(.C) c_int, pArg: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.set_authorizer.?(db, xAuth, pArg);
|
||||
}
|
||||
pub export fn sqlite3_set_auxdata(pCtx: ?*c.sqlite3_context, iArg: c_int, pAux: ?*anyopaque, xDelete: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.set_auxdata.?(pCtx, iArg, pAux, xDelete);
|
||||
}
|
||||
pub export fn sqlite3_step(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.step.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_table_column_metadata(db: ?*c.sqlite3, zDbName: [*c]const u8, zTableName: [*c]const u8, zColumnName: [*c]const u8, pzDataType: [*c][*c]const u8, pzCollSeq: [*c][*c]const u8, pNotNull: [*c]c_int, pPrimaryKey: [*c]c_int, pAutoinc: [*c]c_int) c_int {
|
||||
return sqlite3_api.*.table_column_metadata.?(db, zDbName, zTableName, zColumnName, pzDataType, pzCollSeq, pNotNull, pPrimaryKey, pAutoinc);
|
||||
}
|
||||
pub export fn sqlite3_thread_cleanup() void {
|
||||
return sqlite3_api.*.thread_cleanup.?();
|
||||
}
|
||||
pub export fn sqlite3_total_changes(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.total_changes.?(db);
|
||||
}
|
||||
pub export fn sqlite3_trace(db: ?*c.sqlite3, xTrace: ?*const fn (?*anyopaque, [*c]const u8) callconv(.C) void, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.trace.?(db, xTrace, pArg);
|
||||
}
|
||||
pub export fn sqlite3_update_hook(db: ?*c.sqlite3, xCallback: ?*const fn (?*anyopaque, c_int, [*c]const u8, [*c]const u8, c.sqlite3_int64) callconv(.C) void, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.update_hook.?(db, xCallback, pArg);
|
||||
}
|
||||
pub export fn sqlite3_user_data(pCtx: ?*c.sqlite3_context) ?*anyopaque {
|
||||
return sqlite3_api.*.user_data.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_value_blob(pVal: ?*c.sqlite3_value) ?*const anyopaque {
|
||||
return sqlite3_api.*.value_blob.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_bytes(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_bytes.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_bytes16(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_bytes16.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_double(pVal: ?*c.sqlite3_value) f64 {
|
||||
return sqlite3_api.*.value_double.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_int(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_int.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_int64(pVal: ?*c.sqlite3_value) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.value_int64.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_numeric_type(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_numeric_type.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_text(pVal: ?*c.sqlite3_value) [*c]const u8 {
|
||||
return sqlite3_api.*.value_text.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_text16(pVal: ?*c.sqlite3_value) ?*const anyopaque {
|
||||
return sqlite3_api.*.value_text16.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_text16be(pVal: ?*c.sqlite3_value) ?*const anyopaque {
|
||||
return sqlite3_api.*.value_text16be.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_text16le(pVal: ?*c.sqlite3_value) ?*const anyopaque {
|
||||
return sqlite3_api.*.value_text16le.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_value_type(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_type.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_overload_function(db: ?*c.sqlite3, zFuncName: [*c]const u8, nArg: c_int) c_int {
|
||||
return sqlite3_api.*.overload_function.?(db, zFuncName, nArg);
|
||||
}
|
||||
pub export fn sqlite3_clear_bindings(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.clear_bindings.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_bind_zeroblob(pStmt: ?*c.sqlite3_stmt, i: c_int, n: c_int) c_int {
|
||||
return sqlite3_api.*.bind_zeroblob.?(pStmt, i, n);
|
||||
}
|
||||
pub export fn sqlite3_blob_bytes(pBlob: ?*c.sqlite3_blob) c_int {
|
||||
return sqlite3_api.*.blob_bytes.?(pBlob);
|
||||
}
|
||||
pub export fn sqlite3_blob_close(pBlob: ?*c.sqlite3_blob) c_int {
|
||||
return sqlite3_api.*.blob_close.?(pBlob);
|
||||
}
|
||||
pub export fn sqlite3_blob_open(db: ?*c.sqlite3, zDb: [*c]const u8, zTable: [*c]const u8, zColumn: [*c]const u8, iRow: c.sqlite3_int64, flags: c_int, ppBlob: [*c]?*c.sqlite3_blob) c_int {
|
||||
return sqlite3_api.*.blob_open.?(db, zDb, zTable, zColumn, iRow, flags, ppBlob);
|
||||
}
|
||||
pub export fn sqlite3_blob_read(pBlob: ?*c.sqlite3_blob, z: ?*anyopaque, n: c_int, iOffset: c_int) c_int {
|
||||
return sqlite3_api.*.blob_read.?(pBlob, z, n, iOffset);
|
||||
}
|
||||
pub export fn sqlite3_blob_write(pBlob: ?*c.sqlite3_blob, z: ?*const anyopaque, n: c_int, iOffset: c_int) c_int {
|
||||
return sqlite3_api.*.blob_write.?(pBlob, z, n, iOffset);
|
||||
}
|
||||
pub export fn sqlite3_create_collation_v2(db: ?*c.sqlite3, zName: [*c]const u8, eTextRep: c_int, pCtx: ?*anyopaque, xCompare: ?*const fn (?*anyopaque, c_int, ?*const anyopaque, c_int, ?*const anyopaque) callconv(.C) c_int, xDel: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.create_collation_v2.?(db, zName, eTextRep, pCtx, xCompare, xDel);
|
||||
}
|
||||
pub export fn sqlite3_file_control(db: ?*c.sqlite3, zDbName: [*c]const u8, op: c_int, pArg: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.file_control.?(db, zDbName, op, pArg);
|
||||
}
|
||||
pub export fn sqlite3_memory_highwater(resetFlag: c_int) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.memory_highwater.?(resetFlag);
|
||||
}
|
||||
pub export fn sqlite3_memory_used() c.sqlite3_int64 {
|
||||
return sqlite3_api.*.memory_used.?();
|
||||
}
|
||||
pub export fn sqlite3_mutex_alloc(id: c_int) ?*c.sqlite3_mutex {
|
||||
return sqlite3_api.*.mutex_alloc.?(id);
|
||||
}
|
||||
pub export fn sqlite3_mutex_enter(p: ?*c.sqlite3_mutex) void {
|
||||
return sqlite3_api.*.mutex_enter.?(p);
|
||||
}
|
||||
pub export fn sqlite3_mutex_free(p: ?*c.sqlite3_mutex) void {
|
||||
return sqlite3_api.*.mutex_free.?(p);
|
||||
}
|
||||
pub export fn sqlite3_mutex_leave(p: ?*c.sqlite3_mutex) void {
|
||||
return sqlite3_api.*.mutex_leave.?(p);
|
||||
}
|
||||
pub export fn sqlite3_mutex_try(p: ?*c.sqlite3_mutex) c_int {
|
||||
return sqlite3_api.*.mutex_try.?(p);
|
||||
}
|
||||
pub export fn sqlite3_open_v2(filename: [*c]const u8, ppDb: [*c]?*c.sqlite3, flags: c_int, zVfs: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.open_v2.?(filename, ppDb, flags, zVfs);
|
||||
}
|
||||
pub export fn sqlite3_release_memory(n: c_int) c_int {
|
||||
return sqlite3_api.*.release_memory.?(n);
|
||||
}
|
||||
pub export fn sqlite3_result_error_nomem(pCtx: ?*c.sqlite3_context) void {
|
||||
return sqlite3_api.*.result_error_nomem.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_result_error_toobig(pCtx: ?*c.sqlite3_context) void {
|
||||
return sqlite3_api.*.result_error_toobig.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_sleep(ms: c_int) c_int {
|
||||
return sqlite3_api.*.sleep.?(ms);
|
||||
}
|
||||
pub export fn sqlite3_soft_heap_limit(n: c_int) void {
|
||||
return sqlite3_api.*.soft_heap_limit.?(n);
|
||||
}
|
||||
pub export fn sqlite3_vfs_find(zVfsName: [*c]const u8) [*c]c.sqlite3_vfs {
|
||||
return sqlite3_api.*.vfs_find.?(zVfsName);
|
||||
}
|
||||
pub export fn sqlite3_vfs_register(pVfs: [*c]c.sqlite3_vfs, makeDflt: c_int) c_int {
|
||||
return sqlite3_api.*.vfs_register.?(pVfs, makeDflt);
|
||||
}
|
||||
pub export fn sqlite3_vfs_unregister(pVfs: [*c]c.sqlite3_vfs) c_int {
|
||||
return sqlite3_api.*.vfs_unregister.?(pVfs);
|
||||
}
|
||||
pub export fn sqlite3_threadsafe() c_int {
|
||||
return sqlite3_api.*.xthreadsafe.?();
|
||||
}
|
||||
pub export fn sqlite3_result_zeroblob(pCtx: ?*c.sqlite3_context, n: c_int) void {
|
||||
return sqlite3_api.*.result_zeroblob.?(pCtx, n);
|
||||
}
|
||||
pub export fn sqlite3_result_error_code(pCtx: ?*c.sqlite3_context, errCode: c_int) void {
|
||||
return sqlite3_api.*.result_error_code.?(pCtx, errCode);
|
||||
}
|
||||
pub export fn sqlite3_randomness(N: c_int, pBuf: ?*anyopaque) void {
|
||||
return sqlite3_api.*.randomness.?(N, pBuf);
|
||||
}
|
||||
pub export fn sqlite3_context_db_handle(pCtx: ?*c.sqlite3_context) ?*c.sqlite3 {
|
||||
return sqlite3_api.*.context_db_handle.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_extended_result_codes(pCtx: ?*c.sqlite3, onoff: c_int) c_int {
|
||||
return sqlite3_api.*.extended_result_codes.?(pCtx, onoff);
|
||||
}
|
||||
pub export fn sqlite3_limit(db: ?*c.sqlite3, id: c_int, newVal: c_int) c_int {
|
||||
return sqlite3_api.*.limit.?(db, id, newVal);
|
||||
}
|
||||
pub export fn sqlite3_next_stmt(pDb: ?*c.sqlite3, pStmt: ?*c.sqlite3_stmt) ?*c.sqlite3_stmt {
|
||||
return sqlite3_api.*.next_stmt.?(pDb, pStmt);
|
||||
}
|
||||
pub export fn sqlite3_sql(pStmt: ?*c.sqlite3_stmt) [*c]const u8 {
|
||||
return sqlite3_api.*.sql.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_status(op: c_int, pCurrent: [*c]c_int, pHighwater: [*c]c_int, resetFlag: c_int) c_int {
|
||||
return sqlite3_api.*.status.?(op, pCurrent, pHighwater, resetFlag);
|
||||
}
|
||||
pub export fn sqlite3_backup_finish(p: ?*c.sqlite3_backup) c_int {
|
||||
return sqlite3_api.*.backup_finish.?(p);
|
||||
}
|
||||
pub export fn sqlite3_backup_init(pDest: ?*c.sqlite3, zDestName: [*c]const u8, pSource: ?*c.sqlite3, zSourceName: [*c]const u8) ?*c.sqlite3_backup {
|
||||
return sqlite3_api.*.backup_init.?(pDest, zDestName, pSource, zSourceName);
|
||||
}
|
||||
pub export fn sqlite3_backup_pagecount(p: ?*c.sqlite3_backup) c_int {
|
||||
return sqlite3_api.*.backup_pagecount.?(p);
|
||||
}
|
||||
pub export fn sqlite3_backup_remaining(p: ?*c.sqlite3_backup) c_int {
|
||||
return sqlite3_api.*.backup_remaining.?(p);
|
||||
}
|
||||
pub export fn sqlite3_backup_step(p: ?*c.sqlite3_backup, nPage: c_int) c_int {
|
||||
return sqlite3_api.*.backup_step.?(p, nPage);
|
||||
}
|
||||
pub export fn sqlite3_compileoption_get(N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.compileoption_get.?(N);
|
||||
}
|
||||
pub export fn sqlite3_compileoption_used(zOptName: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.compileoption_used.?(zOptName);
|
||||
}
|
||||
pub export fn sqlite3_create_function_v2(
|
||||
db: ?*c.sqlite3,
|
||||
zFunctionName: [*c]const u8,
|
||||
nArg: c_int,
|
||||
eTextRep: c_int,
|
||||
pApp: ?*anyopaque,
|
||||
xFunc: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void,
|
||||
xStep: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void,
|
||||
xFinal: ?*const fn (?*c.sqlite3_context) callconv(.C) void,
|
||||
xDestroy: ?*const fn (?*anyopaque) callconv(.C) void,
|
||||
) c_int {
|
||||
return sqlite3_api.*.create_function_v2.?(db, zFunctionName, nArg, eTextRep, pApp, xFunc, xStep, xFinal, xDestroy);
|
||||
}
|
||||
pub export fn sqlite3_db_mutex(db: ?*c.sqlite3) ?*c.sqlite3_mutex {
|
||||
return sqlite3_api.*.db_mutex.?(db);
|
||||
}
|
||||
pub export fn sqlite3_db_status(db: ?*c.sqlite3, op: c_int, pCurrent: [*c]c_int, pHighwater: [*c]c_int, resetFlag: c_int) c_int {
|
||||
return sqlite3_api.*.db_status.?(db, op, pCurrent, pHighwater, resetFlag);
|
||||
}
|
||||
pub export fn sqlite3_extended_errcode(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.extended_errcode.?(db);
|
||||
}
|
||||
pub export fn sqlite3_soft_heap_limit64(N: c.sqlite3_int64) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.soft_heap_limit64.?(N);
|
||||
}
|
||||
pub export fn sqlite3_sourceid() [*c]const u8 {
|
||||
return sqlite3_api.*.sourceid.?();
|
||||
}
|
||||
pub export fn sqlite3_stmt_status(pStmt: ?*c.sqlite3_stmt, op: c_int, resetFlag: c_int) c_int {
|
||||
return sqlite3_api.*.stmt_status.?(pStmt, op, resetFlag);
|
||||
}
|
||||
pub export fn sqlite3_strnicmp(zLeft: [*c]const u8, zRight: [*c]const u8, N: c_int) c_int {
|
||||
return sqlite3_api.*.strnicmp.?(zLeft, zRight, N);
|
||||
}
|
||||
pub export fn sqlite3_unlock_notify(pBlocked: ?*c.sqlite3, xNotify: ?*const fn ([*c]?*anyopaque, c_int) callconv(.C) void, pNotifyArg: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.unlock_notify.?(pBlocked, xNotify, pNotifyArg);
|
||||
}
|
||||
pub export fn sqlite3_wal_autocheckpoint(db: ?*c.sqlite3, N: c_int) c_int {
|
||||
return sqlite3_api.*.wal_autocheckpoint.?(db, N);
|
||||
}
|
||||
pub export fn sqlite3_wal_checkpoint(db: ?*c.sqlite3, zDb: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.wal_checkpoint.?(db, zDb);
|
||||
}
|
||||
pub export fn sqlite3_wal_hook(db: ?*c.sqlite3, xCallback: ?*const fn (?*anyopaque, ?*c.sqlite3, [*c]const u8, c_int) callconv(.C) c_int, pArg: ?*anyopaque) ?*anyopaque {
|
||||
return sqlite3_api.*.wal_hook.?(db, xCallback, pArg);
|
||||
}
|
||||
pub export fn sqlite3_blob_reopen(pBlob: ?*c.sqlite3_blob, iRow: c.sqlite3_int64) c_int {
|
||||
return sqlite3_api.*.blob_reopen.?(pBlob, iRow);
|
||||
}
|
||||
pub export fn sqlite3_vtab_on_conflict(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.vtab_on_conflict.?(db);
|
||||
}
|
||||
pub export fn sqlite3_close_v2(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.close_v2.?(db);
|
||||
}
|
||||
pub export fn sqlite3_db_filename(db: ?*c.sqlite3, zDbName: [*c]const u8) [*c]const u8 {
|
||||
return sqlite3_api.*.db_filename.?(db, zDbName);
|
||||
}
|
||||
pub export fn sqlite3_db_readonly(db: ?*c.sqlite3, zDbName: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.db_readonly.?(db, zDbName);
|
||||
}
|
||||
pub export fn sqlite3_db_release_memory(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.db_release_memory.?(db);
|
||||
}
|
||||
pub export fn sqlite3_errstr(rc: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.errstr.?(rc);
|
||||
}
|
||||
pub export fn sqlite3_stmt_busy(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.stmt_busy.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_stmt_readonly(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.stmt_readonly.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_stricmp(zLeft: [*c]const u8, zRight: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.stricmp.?(zLeft, zRight);
|
||||
}
|
||||
pub export fn sqlite3_uri_boolean(zFile: [*c]const u8, zParam: [*c]const u8, bDefault: c_int) c_int {
|
||||
return sqlite3_api.*.uri_boolean.?(zFile, zParam, bDefault);
|
||||
}
|
||||
pub export fn sqlite3_uri_int64(zFilename: [*c]const u8, zParam: [*c]const u8, bDflt: c.sqlite3_int64) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.uri_int64.?(zFilename, zParam, bDflt);
|
||||
}
|
||||
pub export fn sqlite3_uri_parameter(zFilename: [*c]const u8, zParam: [*c]const u8) [*c]const u8 {
|
||||
return sqlite3_api.*.uri_parameter.?(zFilename, zParam);
|
||||
}
|
||||
pub export fn sqlite3_wal_checkpoint_v2(db: ?*c.sqlite3, zDb: [*c]const u8, eMode: c_int, pnLog: [*c]c_int, pnCkpt: [*c]c_int) c_int {
|
||||
return sqlite3_api.*.wal_checkpoint_v2.?(db, zDb, eMode, pnLog, pnCkpt);
|
||||
}
|
||||
pub export fn sqlite3_auto_extension(xEntryPoint: ?*const fn () callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.auto_extension.?(xEntryPoint);
|
||||
}
|
||||
pub export fn sqlite3_bind_blob64(pStmt: ?*c.sqlite3_stmt, i: c_int, zData: ?*const anyopaque, nData: c.sqlite3_uint64, xDel: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.bind_blob64.?(pStmt, i, zData, nData, xDel);
|
||||
}
|
||||
pub export fn sqlite3_bind_text64(pStmt: ?*c.sqlite3_stmt, i: c_int, zData: [*c]const u8, nData: c.sqlite3_uint64, xDel: ?*const fn (?*anyopaque) callconv(.C) void, encoding: u8) c_int {
|
||||
return sqlite3_api.*.bind_text64.?(pStmt, i, zData, nData, xDel, encoding);
|
||||
}
|
||||
pub export fn sqlite3_cancel_auto_extension(xEntryPoint: ?*const fn () callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.cancel_auto_extension.?(xEntryPoint);
|
||||
}
|
||||
pub export fn sqlite3_load_extension(db: ?*c.sqlite3, zFile: [*c]const u8, zProc: [*c]const u8, pzErrMsg: [*c][*c]u8) c_int {
|
||||
return sqlite3_api.*.load_extension.?(db, zFile, zProc, pzErrMsg);
|
||||
}
|
||||
pub export fn sqlite3_malloc64(n: c.sqlite3_uint64) ?*anyopaque {
|
||||
return sqlite3_api.*.malloc64.?(n);
|
||||
}
|
||||
pub export fn sqlite3_msize(p: ?*anyopaque) c.sqlite3_uint64 {
|
||||
return sqlite3_api.*.msize.?(p);
|
||||
}
|
||||
pub export fn sqlite3_realloc64(pOld: ?*anyopaque, n: c.sqlite3_uint64) ?*anyopaque {
|
||||
return sqlite3_api.*.realloc64.?(pOld, n);
|
||||
}
|
||||
pub export fn sqlite3_reset_auto_extension() void {
|
||||
return sqlite3_api.*.reset_auto_extension.?();
|
||||
}
|
||||
pub export fn sqlite3_result_blob64(pCtx: ?*c.sqlite3_context, z: ?*const anyopaque, n: c.sqlite3_uint64, xDel: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_blob64.?(pCtx, z, n, xDel);
|
||||
}
|
||||
pub export fn sqlite3_result_text64(pCtx: ?*c.sqlite3_context, z: [*c]const u8, n: c.sqlite3_uint64, xDel: ?*const fn (?*anyopaque) callconv(.C) void, encoding: u8) void {
|
||||
return sqlite3_api.*.result_text64.?(pCtx, z, n, xDel, encoding);
|
||||
}
|
||||
pub export fn sqlite3_strglob(zGlob: [*c]const u8, zStr: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.strglob.?(zGlob, zStr);
|
||||
}
|
||||
pub export fn sqlite3_value_dup(pOrig: ?*const c.sqlite3_value) ?*c.sqlite3_value {
|
||||
return sqlite3_api.*.value_dup.?(pOrig);
|
||||
}
|
||||
pub export fn sqlite3_value_free(pOld: ?*c.sqlite3_value) void {
|
||||
return sqlite3_api.*.value_free.?(pOld);
|
||||
}
|
||||
pub export fn sqlite3_result_zeroblob64(pCtx: ?*c.sqlite3_context, n: c.sqlite3_uint64) c_int {
|
||||
return sqlite3_api.*.result_zeroblob64.?(pCtx, n);
|
||||
}
|
||||
pub export fn sqlite3_bind_zeroblob64(pStmt: ?*c.sqlite3_stmt, i: c_int, n: c.sqlite3_uint64) c_int {
|
||||
return sqlite3_api.*.bind_zeroblob64.?(pStmt, i, n);
|
||||
}
|
||||
pub export fn sqlite3_value_subtype(pVal: ?*c.sqlite3_value) c_uint {
|
||||
return sqlite3_api.*.value_subtype.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_result_subtype(pCtx: ?*c.sqlite3_context, eSubtype: c_uint) void {
|
||||
return sqlite3_api.*.result_subtype.?(pCtx, eSubtype);
|
||||
}
|
||||
pub export fn sqlite3_status64(op: c_int, pCurrent: [*c]c.sqlite3_int64, pHighwater: [*c]c.sqlite3_int64, resetFlag: c_int) c_int {
|
||||
return sqlite3_api.*.status64.?(op, pCurrent, pHighwater, resetFlag);
|
||||
}
|
||||
pub export fn sqlite3_strlike(zGlob: [*c]const u8, zStr: [*c]const u8, cEsc: c_uint) c_int {
|
||||
return sqlite3_api.*.strlike.?(zGlob, zStr, cEsc);
|
||||
}
|
||||
pub export fn sqlite3_db_cacheflush(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.db_cacheflush.?(db);
|
||||
}
|
||||
pub export fn sqlite3_system_errno(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.system_errno.?(db);
|
||||
}
|
||||
pub export fn sqlite3_trace_v2(db: ?*c.sqlite3, uMask: c_uint, xCallback: ?*const fn (c_uint, ?*anyopaque, ?*anyopaque, ?*anyopaque) callconv(.C) c_int, pCtx: ?*anyopaque) c_int {
|
||||
return sqlite3_api.*.trace_v2.?(db, uMask, xCallback, pCtx);
|
||||
}
|
||||
pub export fn sqlite3_expanded_sql(pStmt: ?*c.sqlite3_stmt) [*c]u8 {
|
||||
return sqlite3_api.*.expanded_sql.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_set_last_insert_rowid(db: ?*c.sqlite3, iRowid: c.sqlite3_int64) void {
|
||||
return sqlite3_api.*.set_last_insert_rowid.?(db, iRowid);
|
||||
}
|
||||
pub export fn sqlite3_prepare_v3(db: ?*c.sqlite3, zSql: [*c]const u8, nByte: c_int, prepFlags: c_uint, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c][*c]const u8) c_int {
|
||||
return sqlite3_api.*.prepare_v3.?(db, zSql, nByte, prepFlags, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_prepare16_v3(db: ?*c.sqlite3, zSql: ?*const anyopaque, nByte: c_int, prepFlags: c_uint, ppStmt: [*c]?*c.sqlite3_stmt, pzTail: [*c]?*const anyopaque) c_int {
|
||||
return sqlite3_api.*.prepare16_v3.?(db, zSql, nByte, prepFlags, ppStmt, pzTail);
|
||||
}
|
||||
pub export fn sqlite3_bind_pointer(pStmt: ?*c.sqlite3_stmt, i: c_int, pPtr: ?*anyopaque, zPTtype: [*c]const u8, xDestructor: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.bind_pointer.?(pStmt, i, pPtr, zPTtype, xDestructor);
|
||||
}
|
||||
pub export fn sqlite3_result_pointer(pCtx: ?*c.sqlite3_context, pPtr: ?*anyopaque, zPType: [*c]const u8, xDestructor: ?*const fn (?*anyopaque) callconv(.C) void) void {
|
||||
return sqlite3_api.*.result_pointer.?(pCtx, pPtr, zPType, xDestructor);
|
||||
}
|
||||
pub export fn sqlite3_value_pointer(pVal: ?*c.sqlite3_value, zPType: [*c]const u8) ?*anyopaque {
|
||||
return sqlite3_api.*.value_pointer.?(pVal, zPType);
|
||||
}
|
||||
pub export fn sqlite3_vtab_nochange(pCtx: ?*c.sqlite3_context) c_int {
|
||||
return sqlite3_api.*.vtab_nochange.?(pCtx);
|
||||
}
|
||||
pub export fn sqlite3_value_nochange(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_nochange.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_vtab_collation(pIdxInfo: [*c]c.sqlite3_index_info, iCons: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.vtab_collation.?(pIdxInfo, iCons);
|
||||
}
|
||||
pub export fn sqlite3_keyword_count() c_int {
|
||||
return sqlite3_api.*.keyword_count.?();
|
||||
}
|
||||
pub export fn sqlite3_keyword_name(i: c_int, pzName: [*c][*c]const u8, pnName: [*c]c_int) c_int {
|
||||
return sqlite3_api.*.keyword_name.?(i, pzName, pnName);
|
||||
}
|
||||
pub export fn sqlite3_keyword_check(zName: [*c]const u8, nName: c_int) c_int {
|
||||
return sqlite3_api.*.keyword_check.?(zName, nName);
|
||||
}
|
||||
pub export fn sqlite3_str_new(db: ?*c.sqlite3) ?*c.sqlite3_str {
|
||||
return sqlite3_api.*.str_new.?(db);
|
||||
}
|
||||
pub export fn sqlite3_str_finish(p: ?*c.sqlite3_str) [*c]u8 {
|
||||
return sqlite3_api.*.str_finish.?(p);
|
||||
}
|
||||
pub export fn sqlite3_str_append(p: ?*c.sqlite3_str, zIn: [*c]const u8, N: c_int) void {
|
||||
return sqlite3_api.*.str_append.?(p, zIn, N);
|
||||
}
|
||||
pub export fn sqlite3_str_appendall(p: ?*c.sqlite3_str, zIn: [*c]const u8) void {
|
||||
return sqlite3_api.*.str_appendall.?(p, zIn);
|
||||
}
|
||||
pub export fn sqlite3_str_appendchar(p: ?*c.sqlite3_str, N: c_int, C: u8) void {
|
||||
return sqlite3_api.*.str_appendchar.?(p, N, C);
|
||||
}
|
||||
pub export fn sqlite3_str_reset(p: ?*c.sqlite3_str) void {
|
||||
return sqlite3_api.*.str_reset.?(p);
|
||||
}
|
||||
pub export fn sqlite3_str_errcode(p: ?*c.sqlite3_str) c_int {
|
||||
return sqlite3_api.*.str_errcode.?(p);
|
||||
}
|
||||
pub export fn sqlite3_str_length(p: ?*c.sqlite3_str) c_int {
|
||||
return sqlite3_api.*.str_length.?(p);
|
||||
}
|
||||
pub export fn sqlite3_str_value(p: ?*c.sqlite3_str) [*c]u8 {
|
||||
return sqlite3_api.*.str_value.?(p);
|
||||
}
|
||||
pub export fn sqlite3_create_window_function(
|
||||
db: ?*c.sqlite3,
|
||||
zFunctionName: [*c]const u8,
|
||||
nArg: c_int,
|
||||
eTextRep: c_int,
|
||||
pArg: ?*anyopaque,
|
||||
xStep: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void,
|
||||
xFinal: ?*const fn (?*c.sqlite3_context) callconv(.C) void,
|
||||
xValue: ?*const fn (?*c.sqlite3_context) callconv(.C) void,
|
||||
xInverse: ?*const fn (?*c.sqlite3_context, c_int, [*c]?*c.sqlite3_value) callconv(.C) void,
|
||||
xDestroy: ?*const fn (?*anyopaque) callconv(.C) void,
|
||||
) c_int {
|
||||
return sqlite3_api.*.create_window_function.?(
|
||||
db,
|
||||
zFunctionName,
|
||||
nArg,
|
||||
eTextRep,
|
||||
pArg,
|
||||
xStep,
|
||||
xFinal,
|
||||
xValue,
|
||||
xInverse,
|
||||
xDestroy,
|
||||
);
|
||||
}
|
||||
pub export fn sqlite3_stmt_isexplain(pStmt: ?*c.sqlite3_stmt) c_int {
|
||||
return sqlite3_api.*.stmt_isexplain.?(pStmt);
|
||||
}
|
||||
pub export fn sqlite3_value_frombind(pVal: ?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.value_frombind.?(pVal);
|
||||
}
|
||||
pub export fn sqlite3_drop_modules(db: ?*c.sqlite3, azKeep: [*c][*c]const u8) c_int {
|
||||
return sqlite3_api.*.drop_modules.?(db, azKeep);
|
||||
}
|
||||
pub export fn sqlite3_hard_heap_limit64(N: c.sqlite3_int64) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.hard_heap_limit64.?(N);
|
||||
}
|
||||
pub export fn sqlite3_uri_key(zFilename: [*c]const u8, N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.uri_key.?(zFilename, N);
|
||||
}
|
||||
pub export fn sqlite3_filename_database(zFilename: [*c]const u8) [*c]const u8 {
|
||||
return sqlite3_api.*.filename_database.?(zFilename);
|
||||
}
|
||||
pub export fn sqlite3_filename_journal(zFilename: [*c]const u8) [*c]const u8 {
|
||||
return sqlite3_api.*.filename_journal.?(zFilename);
|
||||
}
|
||||
pub export fn sqlite3_filename_wal(zFilename: [*c]const u8) [*c]const u8 {
|
||||
return sqlite3_api.*.filename_wal.?(zFilename);
|
||||
}
|
||||
pub export fn sqlite3_create_filename(zDatabase: [*c]const u8, zJournal: [*c]const u8, zWal: [*c]const u8, nParam: c_int, azParam: [*c][*c]const u8) [*c]u8 {
|
||||
return sqlite3_api.*.create_filename.?(zDatabase, zJournal, zWal, nParam, azParam);
|
||||
}
|
||||
pub export fn sqlite3_free_filename(p: [*c]u8) void {
|
||||
return sqlite3_api.*.free_filename.?(p);
|
||||
}
|
||||
pub export fn sqlite3_database_file_object(zName: [*c]const u8) [*c]c.sqlite3_file {
|
||||
return sqlite3_api.*.database_file_object.?(zName);
|
||||
}
|
||||
pub export fn sqlite3_txn_state(db: ?*c.sqlite3, zSchema: [*c]const u8) c_int {
|
||||
return sqlite3_api.*.txn_state.?(db, zSchema);
|
||||
}
|
||||
pub export fn sqlite3_changes64(db: ?*c.sqlite3) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.changes64.?(db);
|
||||
}
|
||||
pub export fn sqlite3_total_changes64(db: ?*c.sqlite3) c.sqlite3_int64 {
|
||||
return sqlite3_api.*.total_changes64.?(db);
|
||||
}
|
||||
pub export fn sqlite3_autovacuum_pages(db: ?*c.sqlite3, xCallback: ?*const fn (?*anyopaque, [*c]const u8, c_uint, c_uint, c_uint) callconv(.C) c_uint, pArg: ?*anyopaque, xDestructor: ?*const fn (?*anyopaque) callconv(.C) void) c_int {
|
||||
return sqlite3_api.*.autovacuum_pages.?(db, xCallback, pArg, xDestructor);
|
||||
}
|
||||
pub export fn sqlite3_error_offset(db: ?*c.sqlite3) c_int {
|
||||
return sqlite3_api.*.error_offset.?(db);
|
||||
}
|
||||
pub export fn sqlite3_vtab_rhs_value(pIdxInfo: [*c]c.sqlite3_index_info, iCons: c_int, ppVal: [*c]?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.vtab_rhs_value.?(pIdxInfo, iCons, ppVal);
|
||||
}
|
||||
pub export fn sqlite3_vtab_distinct(pIdxInfo: [*c]c.sqlite3_index_info) c_int {
|
||||
return sqlite3_api.*.vtab_distinct.?(pIdxInfo);
|
||||
}
|
||||
pub export fn sqlite3_vtab_in(pIdxInfo: [*c]c.sqlite3_index_info, iCons: c_int, bHandle: c_int) c_int {
|
||||
return sqlite3_api.*.vtab_in.?(pIdxInfo, iCons, bHandle);
|
||||
}
|
||||
pub export fn sqlite3_vtab_in_first(pVal: ?*c.sqlite3_value, ppOut: [*c]?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.vtab_in_first.?(pVal, ppOut);
|
||||
}
|
||||
pub export fn sqlite3_vtab_in_next(pVal: ?*c.sqlite3_value, ppOut: [*c]?*c.sqlite3_value) c_int {
|
||||
return sqlite3_api.*.vtab_in_next.?(pVal, ppOut);
|
||||
}
|
||||
pub export fn sqlite3_deserialize(db: ?*c.sqlite3, zSchema: [*c]const u8, pData: [*c]u8, szDb: c.sqlite3_int64, szBuf: c.sqlite3_int64, mFlags: c_uint) c_int {
|
||||
return sqlite3_api.*.deserialize.?(db, zSchema, pData, szDb, szBuf, mFlags);
|
||||
}
|
||||
pub export fn sqlite3_serialize(db: ?*c.sqlite3, zSchema: [*c]const u8, piSize: [*c]c.sqlite3_int64, mFlags: c_uint) [*c]u8 {
|
||||
return sqlite3_api.*.serialize.?(db, zSchema, piSize, mFlags);
|
||||
}
|
||||
pub export fn sqlite3_db_name(db: ?*c.sqlite3, N: c_int) [*c]const u8 {
|
||||
return sqlite3_api.*.db_name.?(db, N);
|
||||
}
|
241687
src/.deps/zig-sqlite/c/sqlite3.c
Normal file
241687
src/.deps/zig-sqlite/c/sqlite3.c
Normal file
File diff suppressed because it is too large
Load Diff
12836
src/.deps/zig-sqlite/c/sqlite3.h
Normal file
12836
src/.deps/zig-sqlite/c/sqlite3.h
Normal file
File diff suppressed because it is too large
Load Diff
701
src/.deps/zig-sqlite/c/sqlite3ext.h
Normal file
701
src/.deps/zig-sqlite/c/sqlite3ext.h
Normal file
|
@ -0,0 +1,701 @@
|
|||
/*
|
||||
** 2006 June 7
|
||||
**
|
||||
** The author disclaims copyright to this source code. In place of
|
||||
** a legal notice, here is a blessing:
|
||||
**
|
||||
** May you do good and not evil.
|
||||
** May you find forgiveness for yourself and forgive others.
|
||||
** May you share freely, never taking more than you give.
|
||||
**
|
||||
*************************************************************************
|
||||
** This header file defines the SQLite interface for use by
|
||||
** shared libraries that want to be imported as extensions into
|
||||
** an SQLite instance. Shared libraries that intend to be loaded
|
||||
** as extensions by SQLite should #include this file instead of
|
||||
** sqlite3.h.
|
||||
*/
|
||||
#ifndef SQLITE3EXT_H
|
||||
#define SQLITE3EXT_H
|
||||
#include "sqlite3.h"
|
||||
|
||||
/*
|
||||
** The following structure holds pointers to all of the SQLite API
|
||||
** routines.
|
||||
**
|
||||
** WARNING: In order to maintain backwards compatibility, add new
|
||||
** interfaces to the end of this structure only. If you insert new
|
||||
** interfaces in the middle of this structure, then older different
|
||||
** versions of SQLite will not be able to load each other's shared
|
||||
** libraries!
|
||||
*/
|
||||
struct sqlite3_api_routines {
|
||||
void * (*aggregate_context)(sqlite3_context*,int nBytes);
|
||||
int (*aggregate_count)(sqlite3_context*);
|
||||
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
|
||||
int (*bind_double)(sqlite3_stmt*,int,double);
|
||||
int (*bind_int)(sqlite3_stmt*,int,int);
|
||||
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
|
||||
int (*bind_null)(sqlite3_stmt*,int);
|
||||
int (*bind_parameter_count)(sqlite3_stmt*);
|
||||
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
|
||||
const char * (*bind_parameter_name)(sqlite3_stmt*,int);
|
||||
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
|
||||
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
|
||||
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
|
||||
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
|
||||
int (*busy_timeout)(sqlite3*,int ms);
|
||||
int (*changes)(sqlite3*);
|
||||
int (*close)(sqlite3*);
|
||||
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const char*));
|
||||
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const void*));
|
||||
const void * (*column_blob)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_count)(sqlite3_stmt*pStmt);
|
||||
const char * (*column_database_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_database_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_decltype)(sqlite3_stmt*,int i);
|
||||
const void * (*column_decltype16)(sqlite3_stmt*,int);
|
||||
double (*column_double)(sqlite3_stmt*,int iCol);
|
||||
int (*column_int)(sqlite3_stmt*,int iCol);
|
||||
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
|
||||
const char * (*column_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_origin_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_origin_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_table_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_table_name16)(sqlite3_stmt*,int);
|
||||
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
|
||||
const void * (*column_text16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_type)(sqlite3_stmt*,int iCol);
|
||||
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
|
||||
void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
|
||||
int (*complete)(const char*sql);
|
||||
int (*complete16)(const void*sql);
|
||||
int (*create_collation)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_collation16)(sqlite3*,const void*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_function16)(sqlite3*,const void*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
|
||||
int (*data_count)(sqlite3_stmt*pStmt);
|
||||
sqlite3 * (*db_handle)(sqlite3_stmt*);
|
||||
int (*declare_vtab)(sqlite3*,const char*);
|
||||
int (*enable_shared_cache)(int);
|
||||
int (*errcode)(sqlite3*db);
|
||||
const char * (*errmsg)(sqlite3*);
|
||||
const void * (*errmsg16)(sqlite3*);
|
||||
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
|
||||
int (*expired)(sqlite3_stmt*);
|
||||
int (*finalize)(sqlite3_stmt*pStmt);
|
||||
void (*free)(void*);
|
||||
void (*free_table)(char**result);
|
||||
int (*get_autocommit)(sqlite3*);
|
||||
void * (*get_auxdata)(sqlite3_context*,int);
|
||||
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
|
||||
int (*global_recover)(void);
|
||||
void (*interruptx)(sqlite3*);
|
||||
sqlite_int64 (*last_insert_rowid)(sqlite3*);
|
||||
const char * (*libversion)(void);
|
||||
int (*libversion_number)(void);
|
||||
void *(*malloc)(int);
|
||||
char * (*mprintf)(const char*,...);
|
||||
int (*open)(const char*,sqlite3**);
|
||||
int (*open16)(const void*,sqlite3**);
|
||||
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
|
||||
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
|
||||
void *(*realloc)(void*,int);
|
||||
int (*reset)(sqlite3_stmt*pStmt);
|
||||
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_double)(sqlite3_context*,double);
|
||||
void (*result_error)(sqlite3_context*,const char*,int);
|
||||
void (*result_error16)(sqlite3_context*,const void*,int);
|
||||
void (*result_int)(sqlite3_context*,int);
|
||||
void (*result_int64)(sqlite3_context*,sqlite_int64);
|
||||
void (*result_null)(sqlite3_context*);
|
||||
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
|
||||
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_value)(sqlite3_context*,sqlite3_value*);
|
||||
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
|
||||
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
|
||||
const char*,const char*),void*);
|
||||
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
|
||||
char * (*xsnprintf)(int,char*,const char*,...);
|
||||
int (*step)(sqlite3_stmt*);
|
||||
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
|
||||
char const**,char const**,int*,int*,int*);
|
||||
void (*thread_cleanup)(void);
|
||||
int (*total_changes)(sqlite3*);
|
||||
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
|
||||
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
|
||||
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
|
||||
sqlite_int64),void*);
|
||||
void * (*user_data)(sqlite3_context*);
|
||||
const void * (*value_blob)(sqlite3_value*);
|
||||
int (*value_bytes)(sqlite3_value*);
|
||||
int (*value_bytes16)(sqlite3_value*);
|
||||
double (*value_double)(sqlite3_value*);
|
||||
int (*value_int)(sqlite3_value*);
|
||||
sqlite_int64 (*value_int64)(sqlite3_value*);
|
||||
int (*value_numeric_type)(sqlite3_value*);
|
||||
const unsigned char * (*value_text)(sqlite3_value*);
|
||||
const void * (*value_text16)(sqlite3_value*);
|
||||
const void * (*value_text16be)(sqlite3_value*);
|
||||
const void * (*value_text16le)(sqlite3_value*);
|
||||
int (*value_type)(sqlite3_value*);
|
||||
char *(*vmprintf)(const char*,va_list);
|
||||
/* Added ??? */
|
||||
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
|
||||
/* Added by 3.3.13 */
|
||||
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
int (*clear_bindings)(sqlite3_stmt*);
|
||||
/* Added by 3.4.1 */
|
||||
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
|
||||
void (*xDestroy)(void *));
|
||||
/* Added by 3.5.0 */
|
||||
int (*bind_zeroblob)(sqlite3_stmt*,int,int);
|
||||
int (*blob_bytes)(sqlite3_blob*);
|
||||
int (*blob_close)(sqlite3_blob*);
|
||||
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
|
||||
int,sqlite3_blob**);
|
||||
int (*blob_read)(sqlite3_blob*,void*,int,int);
|
||||
int (*blob_write)(sqlite3_blob*,const void*,int,int);
|
||||
int (*create_collation_v2)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*),
|
||||
void(*)(void*));
|
||||
int (*file_control)(sqlite3*,const char*,int,void*);
|
||||
sqlite3_int64 (*memory_highwater)(int);
|
||||
sqlite3_int64 (*memory_used)(void);
|
||||
sqlite3_mutex *(*mutex_alloc)(int);
|
||||
void (*mutex_enter)(sqlite3_mutex*);
|
||||
void (*mutex_free)(sqlite3_mutex*);
|
||||
void (*mutex_leave)(sqlite3_mutex*);
|
||||
int (*mutex_try)(sqlite3_mutex*);
|
||||
int (*open_v2)(const char*,sqlite3**,int,const char*);
|
||||
int (*release_memory)(int);
|
||||
void (*result_error_nomem)(sqlite3_context*);
|
||||
void (*result_error_toobig)(sqlite3_context*);
|
||||
int (*sleep)(int);
|
||||
void (*soft_heap_limit)(int);
|
||||
sqlite3_vfs *(*vfs_find)(const char*);
|
||||
int (*vfs_register)(sqlite3_vfs*,int);
|
||||
int (*vfs_unregister)(sqlite3_vfs*);
|
||||
int (*xthreadsafe)(void);
|
||||
void (*result_zeroblob)(sqlite3_context*,int);
|
||||
void (*result_error_code)(sqlite3_context*,int);
|
||||
int (*test_control)(int, ...);
|
||||
void (*randomness)(int,void*);
|
||||
sqlite3 *(*context_db_handle)(sqlite3_context*);
|
||||
int (*extended_result_codes)(sqlite3*,int);
|
||||
int (*limit)(sqlite3*,int,int);
|
||||
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
|
||||
const char *(*sql)(sqlite3_stmt*);
|
||||
int (*status)(int,int*,int*,int);
|
||||
int (*backup_finish)(sqlite3_backup*);
|
||||
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
|
||||
int (*backup_pagecount)(sqlite3_backup*);
|
||||
int (*backup_remaining)(sqlite3_backup*);
|
||||
int (*backup_step)(sqlite3_backup*,int);
|
||||
const char *(*compileoption_get)(int);
|
||||
int (*compileoption_used)(const char*);
|
||||
int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void(*xDestroy)(void*));
|
||||
int (*db_config)(sqlite3*,int,...);
|
||||
sqlite3_mutex *(*db_mutex)(sqlite3*);
|
||||
int (*db_status)(sqlite3*,int,int*,int*,int);
|
||||
int (*extended_errcode)(sqlite3*);
|
||||
void (*log)(int,const char*,...);
|
||||
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
|
||||
const char *(*sourceid)(void);
|
||||
int (*stmt_status)(sqlite3_stmt*,int,int);
|
||||
int (*strnicmp)(const char*,const char*,int);
|
||||
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
|
||||
int (*wal_autocheckpoint)(sqlite3*,int);
|
||||
int (*wal_checkpoint)(sqlite3*,const char*);
|
||||
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
|
||||
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
|
||||
int (*vtab_config)(sqlite3*,int op,...);
|
||||
int (*vtab_on_conflict)(sqlite3*);
|
||||
/* Version 3.7.16 and later */
|
||||
int (*close_v2)(sqlite3*);
|
||||
const char *(*db_filename)(sqlite3*,const char*);
|
||||
int (*db_readonly)(sqlite3*,const char*);
|
||||
int (*db_release_memory)(sqlite3*);
|
||||
const char *(*errstr)(int);
|
||||
int (*stmt_busy)(sqlite3_stmt*);
|
||||
int (*stmt_readonly)(sqlite3_stmt*);
|
||||
int (*stricmp)(const char*,const char*);
|
||||
int (*uri_boolean)(const char*,const char*,int);
|
||||
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
|
||||
const char *(*uri_parameter)(const char*,const char*);
|
||||
char *(*xvsnprintf)(int,char*,const char*,va_list);
|
||||
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
|
||||
/* Version 3.8.7 and later */
|
||||
int (*auto_extension)(void(*)(void));
|
||||
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
|
||||
void(*)(void*),unsigned char);
|
||||
int (*cancel_auto_extension)(void(*)(void));
|
||||
int (*load_extension)(sqlite3*,const char*,const char*,char**);
|
||||
void *(*malloc64)(sqlite3_uint64);
|
||||
sqlite3_uint64 (*msize)(void*);
|
||||
void *(*realloc64)(void*,sqlite3_uint64);
|
||||
void (*reset_auto_extension)(void);
|
||||
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
|
||||
void(*)(void*), unsigned char);
|
||||
int (*strglob)(const char*,const char*);
|
||||
/* Version 3.8.11 and later */
|
||||
sqlite3_value *(*value_dup)(const sqlite3_value*);
|
||||
void (*value_free)(sqlite3_value*);
|
||||
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
|
||||
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
|
||||
/* Version 3.9.0 and later */
|
||||
unsigned int (*value_subtype)(sqlite3_value*);
|
||||
void (*result_subtype)(sqlite3_context*,unsigned int);
|
||||
/* Version 3.10.0 and later */
|
||||
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
|
||||
int (*strlike)(const char*,const char*,unsigned int);
|
||||
int (*db_cacheflush)(sqlite3*);
|
||||
/* Version 3.12.0 and later */
|
||||
int (*system_errno)(sqlite3*);
|
||||
/* Version 3.14.0 and later */
|
||||
int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*);
|
||||
char *(*expanded_sql)(sqlite3_stmt*);
|
||||
/* Version 3.18.0 and later */
|
||||
void (*set_last_insert_rowid)(sqlite3*,sqlite3_int64);
|
||||
/* Version 3.20.0 and later */
|
||||
int (*prepare_v3)(sqlite3*,const char*,int,unsigned int,
|
||||
sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v3)(sqlite3*,const void*,int,unsigned int,
|
||||
sqlite3_stmt**,const void**);
|
||||
int (*bind_pointer)(sqlite3_stmt*,int,void*,const char*,void(*)(void*));
|
||||
void (*result_pointer)(sqlite3_context*,void*,const char*,void(*)(void*));
|
||||
void *(*value_pointer)(sqlite3_value*,const char*);
|
||||
int (*vtab_nochange)(sqlite3_context*);
|
||||
int (*value_nochange)(sqlite3_value*);
|
||||
const char *(*vtab_collation)(sqlite3_index_info*,int);
|
||||
/* Version 3.24.0 and later */
|
||||
int (*keyword_count)(void);
|
||||
int (*keyword_name)(int,const char**,int*);
|
||||
int (*keyword_check)(const char*,int);
|
||||
sqlite3_str *(*str_new)(sqlite3*);
|
||||
char *(*str_finish)(sqlite3_str*);
|
||||
void (*str_appendf)(sqlite3_str*, const char *zFormat, ...);
|
||||
void (*str_vappendf)(sqlite3_str*, const char *zFormat, va_list);
|
||||
void (*str_append)(sqlite3_str*, const char *zIn, int N);
|
||||
void (*str_appendall)(sqlite3_str*, const char *zIn);
|
||||
void (*str_appendchar)(sqlite3_str*, int N, char C);
|
||||
void (*str_reset)(sqlite3_str*);
|
||||
int (*str_errcode)(sqlite3_str*);
|
||||
int (*str_length)(sqlite3_str*);
|
||||
char *(*str_value)(sqlite3_str*);
|
||||
/* Version 3.25.0 and later */
|
||||
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void (*xValue)(sqlite3_context*),
|
||||
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
|
||||
void(*xDestroy)(void*));
|
||||
/* Version 3.26.0 and later */
|
||||
const char *(*normalized_sql)(sqlite3_stmt*);
|
||||
/* Version 3.28.0 and later */
|
||||
int (*stmt_isexplain)(sqlite3_stmt*);
|
||||
int (*value_frombind)(sqlite3_value*);
|
||||
/* Version 3.30.0 and later */
|
||||
int (*drop_modules)(sqlite3*,const char**);
|
||||
/* Version 3.31.0 and later */
|
||||
sqlite3_int64 (*hard_heap_limit64)(sqlite3_int64);
|
||||
const char *(*uri_key)(const char*,int);
|
||||
const char *(*filename_database)(const char*);
|
||||
const char *(*filename_journal)(const char*);
|
||||
const char *(*filename_wal)(const char*);
|
||||
/* Version 3.32.0 and later */
|
||||
char *(*create_filename)(const char*,const char*,const char*,
|
||||
int,const char**);
|
||||
void (*free_filename)(char*);
|
||||
sqlite3_file *(*database_file_object)(const char*);
|
||||
/* Version 3.34.0 and later */
|
||||
int (*txn_state)(sqlite3*,const char*);
|
||||
/* Version 3.36.1 and later */
|
||||
sqlite3_int64 (*changes64)(sqlite3*);
|
||||
sqlite3_int64 (*total_changes64)(sqlite3*);
|
||||
/* Version 3.37.0 and later */
|
||||
int (*autovacuum_pages)(sqlite3*,
|
||||
unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int),
|
||||
void*, void(*)(void*));
|
||||
/* Version 3.38.0 and later */
|
||||
int (*error_offset)(sqlite3*);
|
||||
int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**);
|
||||
int (*vtab_distinct)(sqlite3_index_info*);
|
||||
int (*vtab_in)(sqlite3_index_info*,int,int);
|
||||
int (*vtab_in_first)(sqlite3_value*,sqlite3_value**);
|
||||
int (*vtab_in_next)(sqlite3_value*,sqlite3_value**);
|
||||
/* Version 3.39.0 and later */
|
||||
int (*deserialize)(sqlite3*,const char*,unsigned char*,
|
||||
sqlite3_int64,sqlite3_int64,unsigned);
|
||||
unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*,
|
||||
unsigned int);
|
||||
const char *(*db_name)(sqlite3*,int);
|
||||
};
|
||||
|
||||
/*
|
||||
** This is the function signature used for all extension entry points. It
|
||||
** is also defined in the file "loadext.c".
|
||||
*/
|
||||
typedef int (*sqlite3_loadext_entry)(
|
||||
sqlite3 *db, /* Handle to the database. */
|
||||
char **pzErrMsg, /* Used to set error string on failure. */
|
||||
const sqlite3_api_routines *pThunk /* Extension API function pointers. */
|
||||
);
|
||||
|
||||
/*
|
||||
** The following macros redefine the API routines so that they are
|
||||
** redirected through the global sqlite3_api structure.
|
||||
**
|
||||
** This header file is also used by the loadext.c source file
|
||||
** (part of the main SQLite library - not an extension) so that
|
||||
** it can get access to the sqlite3_api_routines structure
|
||||
** definition. But the main library does not want to redefine
|
||||
** the API. So the redefinition macros are only valid if the
|
||||
** SQLITE_CORE macros is undefined.
|
||||
*/
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
|
||||
#endif
|
||||
#define sqlite3_bind_blob sqlite3_api->bind_blob
|
||||
#define sqlite3_bind_double sqlite3_api->bind_double
|
||||
#define sqlite3_bind_int sqlite3_api->bind_int
|
||||
#define sqlite3_bind_int64 sqlite3_api->bind_int64
|
||||
#define sqlite3_bind_null sqlite3_api->bind_null
|
||||
#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count
|
||||
#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index
|
||||
#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name
|
||||
#define sqlite3_bind_text sqlite3_api->bind_text
|
||||
#define sqlite3_bind_text16 sqlite3_api->bind_text16
|
||||
#define sqlite3_bind_value sqlite3_api->bind_value
|
||||
#define sqlite3_busy_handler sqlite3_api->busy_handler
|
||||
#define sqlite3_busy_timeout sqlite3_api->busy_timeout
|
||||
#define sqlite3_changes sqlite3_api->changes
|
||||
#define sqlite3_close sqlite3_api->close
|
||||
#define sqlite3_collation_needed sqlite3_api->collation_needed
|
||||
#define sqlite3_collation_needed16 sqlite3_api->collation_needed16
|
||||
#define sqlite3_column_blob sqlite3_api->column_blob
|
||||
#define sqlite3_column_bytes sqlite3_api->column_bytes
|
||||
#define sqlite3_column_bytes16 sqlite3_api->column_bytes16
|
||||
#define sqlite3_column_count sqlite3_api->column_count
|
||||
#define sqlite3_column_database_name sqlite3_api->column_database_name
|
||||
#define sqlite3_column_database_name16 sqlite3_api->column_database_name16
|
||||
#define sqlite3_column_decltype sqlite3_api->column_decltype
|
||||
#define sqlite3_column_decltype16 sqlite3_api->column_decltype16
|
||||
#define sqlite3_column_double sqlite3_api->column_double
|
||||
#define sqlite3_column_int sqlite3_api->column_int
|
||||
#define sqlite3_column_int64 sqlite3_api->column_int64
|
||||
#define sqlite3_column_name sqlite3_api->column_name
|
||||
#define sqlite3_column_name16 sqlite3_api->column_name16
|
||||
#define sqlite3_column_origin_name sqlite3_api->column_origin_name
|
||||
#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16
|
||||
#define sqlite3_column_table_name sqlite3_api->column_table_name
|
||||
#define sqlite3_column_table_name16 sqlite3_api->column_table_name16
|
||||
#define sqlite3_column_text sqlite3_api->column_text
|
||||
#define sqlite3_column_text16 sqlite3_api->column_text16
|
||||
#define sqlite3_column_type sqlite3_api->column_type
|
||||
#define sqlite3_column_value sqlite3_api->column_value
|
||||
#define sqlite3_commit_hook sqlite3_api->commit_hook
|
||||
#define sqlite3_complete sqlite3_api->complete
|
||||
#define sqlite3_complete16 sqlite3_api->complete16
|
||||
#define sqlite3_create_collation sqlite3_api->create_collation
|
||||
#define sqlite3_create_collation16 sqlite3_api->create_collation16
|
||||
#define sqlite3_create_function sqlite3_api->create_function
|
||||
#define sqlite3_create_function16 sqlite3_api->create_function16
|
||||
#define sqlite3_create_module sqlite3_api->create_module
|
||||
#define sqlite3_create_module_v2 sqlite3_api->create_module_v2
|
||||
#define sqlite3_data_count sqlite3_api->data_count
|
||||
#define sqlite3_db_handle sqlite3_api->db_handle
|
||||
#define sqlite3_declare_vtab sqlite3_api->declare_vtab
|
||||
#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache
|
||||
#define sqlite3_errcode sqlite3_api->errcode
|
||||
#define sqlite3_errmsg sqlite3_api->errmsg
|
||||
#define sqlite3_errmsg16 sqlite3_api->errmsg16
|
||||
#define sqlite3_exec sqlite3_api->exec
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_expired sqlite3_api->expired
|
||||
#endif
|
||||
#define sqlite3_finalize sqlite3_api->finalize
|
||||
#define sqlite3_free sqlite3_api->free
|
||||
#define sqlite3_free_table sqlite3_api->free_table
|
||||
#define sqlite3_get_autocommit sqlite3_api->get_autocommit
|
||||
#define sqlite3_get_auxdata sqlite3_api->get_auxdata
|
||||
#define sqlite3_get_table sqlite3_api->get_table
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_global_recover sqlite3_api->global_recover
|
||||
#endif
|
||||
#define sqlite3_interrupt sqlite3_api->interruptx
|
||||
#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid
|
||||
#define sqlite3_libversion sqlite3_api->libversion
|
||||
#define sqlite3_libversion_number sqlite3_api->libversion_number
|
||||
#define sqlite3_malloc sqlite3_api->malloc
|
||||
#define sqlite3_mprintf sqlite3_api->mprintf
|
||||
#define sqlite3_open sqlite3_api->open
|
||||
#define sqlite3_open16 sqlite3_api->open16
|
||||
#define sqlite3_prepare sqlite3_api->prepare
|
||||
#define sqlite3_prepare16 sqlite3_api->prepare16
|
||||
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
|
||||
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
|
||||
#define sqlite3_profile sqlite3_api->profile
|
||||
#define sqlite3_progress_handler sqlite3_api->progress_handler
|
||||
#define sqlite3_realloc sqlite3_api->realloc
|
||||
#define sqlite3_reset sqlite3_api->reset
|
||||
#define sqlite3_result_blob sqlite3_api->result_blob
|
||||
#define sqlite3_result_double sqlite3_api->result_double
|
||||
#define sqlite3_result_error sqlite3_api->result_error
|
||||
#define sqlite3_result_error16 sqlite3_api->result_error16
|
||||
#define sqlite3_result_int sqlite3_api->result_int
|
||||
#define sqlite3_result_int64 sqlite3_api->result_int64
|
||||
#define sqlite3_result_null sqlite3_api->result_null
|
||||
#define sqlite3_result_text sqlite3_api->result_text
|
||||
#define sqlite3_result_text16 sqlite3_api->result_text16
|
||||
#define sqlite3_result_text16be sqlite3_api->result_text16be
|
||||
#define sqlite3_result_text16le sqlite3_api->result_text16le
|
||||
#define sqlite3_result_value sqlite3_api->result_value
|
||||
#define sqlite3_rollback_hook sqlite3_api->rollback_hook
|
||||
#define sqlite3_set_authorizer sqlite3_api->set_authorizer
|
||||
#define sqlite3_set_auxdata sqlite3_api->set_auxdata
|
||||
#define sqlite3_snprintf sqlite3_api->xsnprintf
|
||||
#define sqlite3_step sqlite3_api->step
|
||||
#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata
|
||||
#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup
|
||||
#define sqlite3_total_changes sqlite3_api->total_changes
|
||||
#define sqlite3_trace sqlite3_api->trace
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings
|
||||
#endif
|
||||
#define sqlite3_update_hook sqlite3_api->update_hook
|
||||
#define sqlite3_user_data sqlite3_api->user_data
|
||||
#define sqlite3_value_blob sqlite3_api->value_blob
|
||||
#define sqlite3_value_bytes sqlite3_api->value_bytes
|
||||
#define sqlite3_value_bytes16 sqlite3_api->value_bytes16
|
||||
#define sqlite3_value_double sqlite3_api->value_double
|
||||
#define sqlite3_value_int sqlite3_api->value_int
|
||||
#define sqlite3_value_int64 sqlite3_api->value_int64
|
||||
#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type
|
||||
#define sqlite3_value_text sqlite3_api->value_text
|
||||
#define sqlite3_value_text16 sqlite3_api->value_text16
|
||||
#define sqlite3_value_text16be sqlite3_api->value_text16be
|
||||
#define sqlite3_value_text16le sqlite3_api->value_text16le
|
||||
#define sqlite3_value_type sqlite3_api->value_type
|
||||
#define sqlite3_vmprintf sqlite3_api->vmprintf
|
||||
#define sqlite3_vsnprintf sqlite3_api->xvsnprintf
|
||||
#define sqlite3_overload_function sqlite3_api->overload_function
|
||||
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
|
||||
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
|
||||
#define sqlite3_clear_bindings sqlite3_api->clear_bindings
|
||||
#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob
|
||||
#define sqlite3_blob_bytes sqlite3_api->blob_bytes
|
||||
#define sqlite3_blob_close sqlite3_api->blob_close
|
||||
#define sqlite3_blob_open sqlite3_api->blob_open
|
||||
#define sqlite3_blob_read sqlite3_api->blob_read
|
||||
#define sqlite3_blob_write sqlite3_api->blob_write
|
||||
#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2
|
||||
#define sqlite3_file_control sqlite3_api->file_control
|
||||
#define sqlite3_memory_highwater sqlite3_api->memory_highwater
|
||||
#define sqlite3_memory_used sqlite3_api->memory_used
|
||||
#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc
|
||||
#define sqlite3_mutex_enter sqlite3_api->mutex_enter
|
||||
#define sqlite3_mutex_free sqlite3_api->mutex_free
|
||||
#define sqlite3_mutex_leave sqlite3_api->mutex_leave
|
||||
#define sqlite3_mutex_try sqlite3_api->mutex_try
|
||||
#define sqlite3_open_v2 sqlite3_api->open_v2
|
||||
#define sqlite3_release_memory sqlite3_api->release_memory
|
||||
#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem
|
||||
#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig
|
||||
#define sqlite3_sleep sqlite3_api->sleep
|
||||
#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit
|
||||
#define sqlite3_vfs_find sqlite3_api->vfs_find
|
||||
#define sqlite3_vfs_register sqlite3_api->vfs_register
|
||||
#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister
|
||||
#define sqlite3_threadsafe sqlite3_api->xthreadsafe
|
||||
#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob
|
||||
#define sqlite3_result_error_code sqlite3_api->result_error_code
|
||||
#define sqlite3_test_control sqlite3_api->test_control
|
||||
#define sqlite3_randomness sqlite3_api->randomness
|
||||
#define sqlite3_context_db_handle sqlite3_api->context_db_handle
|
||||
#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes
|
||||
#define sqlite3_limit sqlite3_api->limit
|
||||
#define sqlite3_next_stmt sqlite3_api->next_stmt
|
||||
#define sqlite3_sql sqlite3_api->sql
|
||||
#define sqlite3_status sqlite3_api->status
|
||||
#define sqlite3_backup_finish sqlite3_api->backup_finish
|
||||
#define sqlite3_backup_init sqlite3_api->backup_init
|
||||
#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount
|
||||
#define sqlite3_backup_remaining sqlite3_api->backup_remaining
|
||||
#define sqlite3_backup_step sqlite3_api->backup_step
|
||||
#define sqlite3_compileoption_get sqlite3_api->compileoption_get
|
||||
#define sqlite3_compileoption_used sqlite3_api->compileoption_used
|
||||
#define sqlite3_create_function_v2 sqlite3_api->create_function_v2
|
||||
#define sqlite3_db_config sqlite3_api->db_config
|
||||
#define sqlite3_db_mutex sqlite3_api->db_mutex
|
||||
#define sqlite3_db_status sqlite3_api->db_status
|
||||
#define sqlite3_extended_errcode sqlite3_api->extended_errcode
|
||||
#define sqlite3_log sqlite3_api->log
|
||||
#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64
|
||||
#define sqlite3_sourceid sqlite3_api->sourceid
|
||||
#define sqlite3_stmt_status sqlite3_api->stmt_status
|
||||
#define sqlite3_strnicmp sqlite3_api->strnicmp
|
||||
#define sqlite3_unlock_notify sqlite3_api->unlock_notify
|
||||
#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint
|
||||
#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint
|
||||
#define sqlite3_wal_hook sqlite3_api->wal_hook
|
||||
#define sqlite3_blob_reopen sqlite3_api->blob_reopen
|
||||
#define sqlite3_vtab_config sqlite3_api->vtab_config
|
||||
#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict
|
||||
/* Version 3.7.16 and later */
|
||||
#define sqlite3_close_v2 sqlite3_api->close_v2
|
||||
#define sqlite3_db_filename sqlite3_api->db_filename
|
||||
#define sqlite3_db_readonly sqlite3_api->db_readonly
|
||||
#define sqlite3_db_release_memory sqlite3_api->db_release_memory
|
||||
#define sqlite3_errstr sqlite3_api->errstr
|
||||
#define sqlite3_stmt_busy sqlite3_api->stmt_busy
|
||||
#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly
|
||||
#define sqlite3_stricmp sqlite3_api->stricmp
|
||||
#define sqlite3_uri_boolean sqlite3_api->uri_boolean
|
||||
#define sqlite3_uri_int64 sqlite3_api->uri_int64
|
||||
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
|
||||
#define sqlite3_uri_vsnprintf sqlite3_api->xvsnprintf
|
||||
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
|
||||
/* Version 3.8.7 and later */
|
||||
#define sqlite3_auto_extension sqlite3_api->auto_extension
|
||||
#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
|
||||
#define sqlite3_bind_text64 sqlite3_api->bind_text64
|
||||
#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
|
||||
#define sqlite3_load_extension sqlite3_api->load_extension
|
||||
#define sqlite3_malloc64 sqlite3_api->malloc64
|
||||
#define sqlite3_msize sqlite3_api->msize
|
||||
#define sqlite3_realloc64 sqlite3_api->realloc64
|
||||
#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
|
||||
#define sqlite3_result_blob64 sqlite3_api->result_blob64
|
||||
#define sqlite3_result_text64 sqlite3_api->result_text64
|
||||
#define sqlite3_strglob sqlite3_api->strglob
|
||||
/* Version 3.8.11 and later */
|
||||
#define sqlite3_value_dup sqlite3_api->value_dup
|
||||
#define sqlite3_value_free sqlite3_api->value_free
|
||||
#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
|
||||
#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
|
||||
/* Version 3.9.0 and later */
|
||||
#define sqlite3_value_subtype sqlite3_api->value_subtype
|
||||
#define sqlite3_result_subtype sqlite3_api->result_subtype
|
||||
/* Version 3.10.0 and later */
|
||||
#define sqlite3_status64 sqlite3_api->status64
|
||||
#define sqlite3_strlike sqlite3_api->strlike
|
||||
#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush
|
||||
/* Version 3.12.0 and later */
|
||||
#define sqlite3_system_errno sqlite3_api->system_errno
|
||||
/* Version 3.14.0 and later */
|
||||
#define sqlite3_trace_v2 sqlite3_api->trace_v2
|
||||
#define sqlite3_expanded_sql sqlite3_api->expanded_sql
|
||||
/* Version 3.18.0 and later */
|
||||
#define sqlite3_set_last_insert_rowid sqlite3_api->set_last_insert_rowid
|
||||
/* Version 3.20.0 and later */
|
||||
#define sqlite3_prepare_v3 sqlite3_api->prepare_v3
|
||||
#define sqlite3_prepare16_v3 sqlite3_api->prepare16_v3
|
||||
#define sqlite3_bind_pointer sqlite3_api->bind_pointer
|
||||
#define sqlite3_result_pointer sqlite3_api->result_pointer
|
||||
#define sqlite3_value_pointer sqlite3_api->value_pointer
|
||||
/* Version 3.22.0 and later */
|
||||
#define sqlite3_vtab_nochange sqlite3_api->vtab_nochange
|
||||
#define sqlite3_value_nochange sqlite3_api->value_nochange
|
||||
#define sqlite3_vtab_collation sqlite3_api->vtab_collation
|
||||
/* Version 3.24.0 and later */
|
||||
#define sqlite3_keyword_count sqlite3_api->keyword_count
|
||||
#define sqlite3_keyword_name sqlite3_api->keyword_name
|
||||
#define sqlite3_keyword_check sqlite3_api->keyword_check
|
||||
#define sqlite3_str_new sqlite3_api->str_new
|
||||
#define sqlite3_str_finish sqlite3_api->str_finish
|
||||
#define sqlite3_str_appendf sqlite3_api->str_appendf
|
||||
#define sqlite3_str_vappendf sqlite3_api->str_vappendf
|
||||
#define sqlite3_str_append sqlite3_api->str_append
|
||||
#define sqlite3_str_appendall sqlite3_api->str_appendall
|
||||
#define sqlite3_str_appendchar sqlite3_api->str_appendchar
|
||||
#define sqlite3_str_reset sqlite3_api->str_reset
|
||||
#define sqlite3_str_errcode sqlite3_api->str_errcode
|
||||
#define sqlite3_str_length sqlite3_api->str_length
|
||||
#define sqlite3_str_value sqlite3_api->str_value
|
||||
/* Version 3.25.0 and later */
|
||||
#define sqlite3_create_window_function sqlite3_api->create_window_function
|
||||
/* Version 3.26.0 and later */
|
||||
#define sqlite3_normalized_sql sqlite3_api->normalized_sql
|
||||
/* Version 3.28.0 and later */
|
||||
#define sqlite3_stmt_isexplain sqlite3_api->stmt_isexplain
|
||||
#define sqlite3_value_frombind sqlite3_api->value_frombind
|
||||
/* Version 3.30.0 and later */
|
||||
#define sqlite3_drop_modules sqlite3_api->drop_modules
|
||||
/* Version 3.31.0 and later */
|
||||
#define sqlite3_hard_heap_limit64 sqlite3_api->hard_heap_limit64
|
||||
#define sqlite3_uri_key sqlite3_api->uri_key
|
||||
#define sqlite3_filename_database sqlite3_api->filename_database
|
||||
#define sqlite3_filename_journal sqlite3_api->filename_journal
|
||||
#define sqlite3_filename_wal sqlite3_api->filename_wal
|
||||
/* Version 3.32.0 and later */
|
||||
#define sqlite3_create_filename sqlite3_api->create_filename
|
||||
#define sqlite3_free_filename sqlite3_api->free_filename
|
||||
#define sqlite3_database_file_object sqlite3_api->database_file_object
|
||||
/* Version 3.34.0 and later */
|
||||
#define sqlite3_txn_state sqlite3_api->txn_state
|
||||
/* Version 3.36.1 and later */
|
||||
#define sqlite3_changes64 sqlite3_api->changes64
|
||||
#define sqlite3_total_changes64 sqlite3_api->total_changes64
|
||||
/* Version 3.37.0 and later */
|
||||
#define sqlite3_autovacuum_pages sqlite3_api->autovacuum_pages
|
||||
/* Version 3.38.0 and later */
|
||||
#define sqlite3_error_offset sqlite3_api->error_offset
|
||||
#define sqlite3_vtab_rhs_value sqlite3_api->vtab_rhs_value
|
||||
#define sqlite3_vtab_distinct sqlite3_api->vtab_distinct
|
||||
#define sqlite3_vtab_in sqlite3_api->vtab_in
|
||||
#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first
|
||||
#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next
|
||||
/* Version 3.39.0 and later */
|
||||
#ifndef SQLITE_OMIT_DESERIALIZE
|
||||
#define sqlite3_deserialize sqlite3_api->deserialize
|
||||
#define sqlite3_serialize sqlite3_api->serialize
|
||||
#endif
|
||||
#define sqlite3_db_name sqlite3_api->db_name
|
||||
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
|
||||
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
/* This case when the file really is being compiled as a loadable
|
||||
** extension */
|
||||
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
|
||||
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
|
||||
# define SQLITE_EXTENSION_INIT3 \
|
||||
extern const sqlite3_api_routines *sqlite3_api;
|
||||
#else
|
||||
/* This case when the file is being statically linked into the
|
||||
** application */
|
||||
# define SQLITE_EXTENSION_INIT1 /*no-op*/
|
||||
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
|
||||
# define SQLITE_EXTENSION_INIT3 /*no-op*/
|
||||
#endif
|
||||
|
||||
#endif /* SQLITE3EXT_H */
|
314
src/.deps/zig-sqlite/errors.zig
Normal file
314
src/.deps/zig-sqlite/errors.zig
Normal file
|
@ -0,0 +1,314 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const c = @import("c.zig").c;
|
||||
const versionGreaterThanOrEqualTo = @import("c.zig").versionGreaterThanOrEqualTo;
|
||||
|
||||
pub const SQLiteExtendedIOError = error{
|
||||
SQLiteIOErrRead,
|
||||
SQLiteIOErrShortRead,
|
||||
SQLiteIOErrWrite,
|
||||
SQLiteIOErrFsync,
|
||||
SQLiteIOErrDirFsync,
|
||||
SQLiteIOErrTruncate,
|
||||
SQLiteIOErrFstat,
|
||||
SQLiteIOErrUnlock,
|
||||
SQLiteIOErrRDLock,
|
||||
SQLiteIOErrDelete,
|
||||
SQLiteIOErrBlocked,
|
||||
SQLiteIOErrNoMem,
|
||||
SQLiteIOErrAccess,
|
||||
SQLiteIOErrCheckReservedLock,
|
||||
SQLiteIOErrLock,
|
||||
SQLiteIOErrClose,
|
||||
SQLiteIOErrDirClose,
|
||||
SQLiteIOErrSHMOpen,
|
||||
SQLiteIOErrSHMSize,
|
||||
SQLiteIOErrSHMLock,
|
||||
SQLiteIOErrSHMMap,
|
||||
SQLiteIOErrSeek,
|
||||
SQLiteIOErrDeleteNoEnt,
|
||||
SQLiteIOErrMmap,
|
||||
SQLiteIOErrGetTempPath,
|
||||
SQLiteIOErrConvPath,
|
||||
SQLiteIOErrVnode,
|
||||
SQLiteIOErrAuth,
|
||||
SQLiteIOErrBeginAtomic,
|
||||
SQLiteIOErrCommitAtomic,
|
||||
SQLiteIOErrRollbackAtomic,
|
||||
SQLiteIOErrData,
|
||||
SQLiteIOErrCorruptFS,
|
||||
};
|
||||
|
||||
pub const SQLiteExtendedCantOpenError = error{
|
||||
SQLiteCantOpenNoTempDir,
|
||||
SQLiteCantOpenIsDir,
|
||||
SQLiteCantOpenFullPath,
|
||||
SQLiteCantOpenConvPath,
|
||||
SQLiteCantOpenDirtyWAL,
|
||||
SQLiteCantOpenSymlink,
|
||||
};
|
||||
|
||||
pub const SQLiteExtendedReadOnlyError = error{
|
||||
SQLiteReadOnlyRecovery,
|
||||
SQLiteReadOnlyCantLock,
|
||||
SQLiteReadOnlyRollback,
|
||||
SQLiteReadOnlyDBMoved,
|
||||
SQLiteReadOnlyCantInit,
|
||||
SQLiteReadOnlyDirectory,
|
||||
};
|
||||
|
||||
pub const SQLiteExtendedConstraintError = error{
|
||||
SQLiteConstraintCheck,
|
||||
SQLiteConstraintCommitHook,
|
||||
SQLiteConstraintForeignKey,
|
||||
SQLiteConstraintFunction,
|
||||
SQLiteConstraintNotNull,
|
||||
SQLiteConstraintPrimaryKey,
|
||||
SQLiteConstraintTrigger,
|
||||
SQLiteConstraintUnique,
|
||||
SQLiteConstraintVTab,
|
||||
SQLiteConstraintRowID,
|
||||
SQLiteConstraintPinned,
|
||||
};
|
||||
|
||||
pub const SQLiteExtendedError = error{
|
||||
SQLiteErrorMissingCollSeq,
|
||||
SQLiteErrorRetry,
|
||||
SQLiteErrorSnapshot,
|
||||
|
||||
SQLiteLockedSharedCache,
|
||||
SQLiteLockedVTab,
|
||||
|
||||
SQLiteBusyRecovery,
|
||||
SQLiteBusySnapshot,
|
||||
SQLiteBusyTimeout,
|
||||
|
||||
SQLiteCorruptVTab,
|
||||
SQLiteCorruptSequence,
|
||||
SQLiteCorruptIndex,
|
||||
|
||||
SQLiteAbortRollback,
|
||||
};
|
||||
|
||||
pub const SQLiteError = error{
|
||||
SQLiteError,
|
||||
SQLiteInternal,
|
||||
SQLitePerm,
|
||||
SQLiteAbort,
|
||||
SQLiteBusy,
|
||||
SQLiteLocked,
|
||||
SQLiteNoMem,
|
||||
SQLiteReadOnly,
|
||||
SQLiteInterrupt,
|
||||
SQLiteIOErr,
|
||||
SQLiteCorrupt,
|
||||
SQLiteNotFound,
|
||||
SQLiteFull,
|
||||
SQLiteCantOpen,
|
||||
SQLiteProtocol,
|
||||
SQLiteEmpty,
|
||||
SQLiteSchema,
|
||||
SQLiteTooBig,
|
||||
SQLiteConstraint,
|
||||
SQLiteMismatch,
|
||||
SQLiteMisuse,
|
||||
SQLiteNoLFS,
|
||||
SQLiteAuth,
|
||||
SQLiteRange,
|
||||
SQLiteNotADatabase,
|
||||
SQLiteNotice,
|
||||
SQLiteWarning,
|
||||
};
|
||||
|
||||
pub const Error = SQLiteError ||
|
||||
SQLiteExtendedError ||
|
||||
SQLiteExtendedIOError ||
|
||||
SQLiteExtendedCantOpenError ||
|
||||
SQLiteExtendedReadOnlyError ||
|
||||
SQLiteExtendedConstraintError;
|
||||
|
||||
pub fn errorFromResultCode(code: c_int) Error {
|
||||
// These errors are only available since 3.22.0.
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 22, 0)) {
|
||||
switch (code) {
|
||||
c.SQLITE_ERROR_MISSING_COLLSEQ => return error.SQLiteErrorMissingCollSeq,
|
||||
c.SQLITE_ERROR_RETRY => return error.SQLiteErrorRetry,
|
||||
c.SQLITE_READONLY_CANTINIT => return error.SQLiteReadOnlyCantInit,
|
||||
c.SQLITE_READONLY_DIRECTORY => return error.SQLiteReadOnlyDirectory,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
// These errors are only available since 3.25.0.
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 25, 0)) {
|
||||
switch (code) {
|
||||
c.SQLITE_ERROR_SNAPSHOT => return error.SQLiteErrorSnapshot,
|
||||
c.SQLITE_LOCKED_VTAB => return error.SQLiteLockedVTab,
|
||||
c.SQLITE_CANTOPEN_DIRTYWAL => return error.SQLiteCantOpenDirtyWAL,
|
||||
c.SQLITE_CORRUPT_SEQUENCE => return error.SQLiteCorruptSequence,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
// These errors are only available since 3.31.0.
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 31, 0)) {
|
||||
switch (code) {
|
||||
c.SQLITE_CANTOPEN_SYMLINK => return error.SQLiteCantOpenSymlink,
|
||||
c.SQLITE_CONSTRAINT_PINNED => return error.SQLiteConstraintPinned,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
// These errors are only available since 3.32.0.
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 32, 0)) {
|
||||
switch (code) {
|
||||
c.SQLITE_IOERR_DATA => return error.SQLiteIOErrData, // See https://sqlite.org/cksumvfs.html
|
||||
c.SQLITE_BUSY_TIMEOUT => return error.SQLiteBusyTimeout,
|
||||
c.SQLITE_CORRUPT_INDEX => return error.SQLiteCorruptIndex,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
// These errors are only available since 3.34.0.
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 34, 0)) {
|
||||
switch (code) {
|
||||
c.SQLITE_IOERR_CORRUPTFS => return error.SQLiteIOErrCorruptFS,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
switch (code) {
|
||||
c.SQLITE_ERROR => return error.SQLiteError,
|
||||
c.SQLITE_INTERNAL => return error.SQLiteInternal,
|
||||
c.SQLITE_PERM => return error.SQLitePerm,
|
||||
c.SQLITE_ABORT => return error.SQLiteAbort,
|
||||
c.SQLITE_BUSY => return error.SQLiteBusy,
|
||||
c.SQLITE_LOCKED => return error.SQLiteLocked,
|
||||
c.SQLITE_NOMEM => return error.SQLiteNoMem,
|
||||
c.SQLITE_READONLY => return error.SQLiteReadOnly,
|
||||
c.SQLITE_INTERRUPT => return error.SQLiteInterrupt,
|
||||
c.SQLITE_IOERR => return error.SQLiteIOErr,
|
||||
c.SQLITE_CORRUPT => return error.SQLiteCorrupt,
|
||||
c.SQLITE_NOTFOUND => return error.SQLiteNotFound,
|
||||
c.SQLITE_FULL => return error.SQLiteFull,
|
||||
c.SQLITE_CANTOPEN => return error.SQLiteCantOpen,
|
||||
c.SQLITE_PROTOCOL => return error.SQLiteProtocol,
|
||||
c.SQLITE_EMPTY => return error.SQLiteEmpty,
|
||||
c.SQLITE_SCHEMA => return error.SQLiteSchema,
|
||||
c.SQLITE_TOOBIG => return error.SQLiteTooBig,
|
||||
c.SQLITE_CONSTRAINT => return error.SQLiteConstraint,
|
||||
c.SQLITE_MISMATCH => return error.SQLiteMismatch,
|
||||
c.SQLITE_MISUSE => return error.SQLiteMisuse,
|
||||
c.SQLITE_NOLFS => return error.SQLiteNoLFS,
|
||||
c.SQLITE_AUTH => return error.SQLiteAuth,
|
||||
c.SQLITE_RANGE => return error.SQLiteRange,
|
||||
c.SQLITE_NOTADB => return error.SQLiteNotADatabase,
|
||||
c.SQLITE_NOTICE => return error.SQLiteNotice,
|
||||
c.SQLITE_WARNING => return error.SQLiteWarning,
|
||||
|
||||
c.SQLITE_IOERR_READ => return error.SQLiteIOErrRead,
|
||||
c.SQLITE_IOERR_SHORT_READ => return error.SQLiteIOErrShortRead,
|
||||
c.SQLITE_IOERR_WRITE => return error.SQLiteIOErrWrite,
|
||||
c.SQLITE_IOERR_FSYNC => return error.SQLiteIOErrFsync,
|
||||
c.SQLITE_IOERR_DIR_FSYNC => return error.SQLiteIOErrDirFsync,
|
||||
c.SQLITE_IOERR_TRUNCATE => return error.SQLiteIOErrTruncate,
|
||||
c.SQLITE_IOERR_FSTAT => return error.SQLiteIOErrFstat,
|
||||
c.SQLITE_IOERR_UNLOCK => return error.SQLiteIOErrUnlock,
|
||||
c.SQLITE_IOERR_RDLOCK => return error.SQLiteIOErrRDLock,
|
||||
c.SQLITE_IOERR_DELETE => return error.SQLiteIOErrDelete,
|
||||
c.SQLITE_IOERR_BLOCKED => return error.SQLiteIOErrBlocked,
|
||||
c.SQLITE_IOERR_NOMEM => return error.SQLiteIOErrNoMem,
|
||||
c.SQLITE_IOERR_ACCESS => return error.SQLiteIOErrAccess,
|
||||
c.SQLITE_IOERR_CHECKRESERVEDLOCK => return error.SQLiteIOErrCheckReservedLock,
|
||||
c.SQLITE_IOERR_LOCK => return error.SQLiteIOErrLock,
|
||||
c.SQLITE_IOERR_CLOSE => return error.SQLiteIOErrClose,
|
||||
c.SQLITE_IOERR_DIR_CLOSE => return error.SQLiteIOErrDirClose,
|
||||
c.SQLITE_IOERR_SHMOPEN => return error.SQLiteIOErrSHMOpen,
|
||||
c.SQLITE_IOERR_SHMSIZE => return error.SQLiteIOErrSHMSize,
|
||||
c.SQLITE_IOERR_SHMLOCK => return error.SQLiteIOErrSHMLock,
|
||||
c.SQLITE_IOERR_SHMMAP => return error.SQLiteIOErrSHMMap,
|
||||
c.SQLITE_IOERR_SEEK => return error.SQLiteIOErrSeek,
|
||||
c.SQLITE_IOERR_DELETE_NOENT => return error.SQLiteIOErrDeleteNoEnt,
|
||||
c.SQLITE_IOERR_MMAP => return error.SQLiteIOErrMmap,
|
||||
c.SQLITE_IOERR_GETTEMPPATH => return error.SQLiteIOErrGetTempPath,
|
||||
c.SQLITE_IOERR_CONVPATH => return error.SQLiteIOErrConvPath,
|
||||
c.SQLITE_IOERR_VNODE => return error.SQLiteIOErrVnode,
|
||||
c.SQLITE_IOERR_AUTH => return error.SQLiteIOErrAuth,
|
||||
c.SQLITE_IOERR_BEGIN_ATOMIC => return error.SQLiteIOErrBeginAtomic,
|
||||
c.SQLITE_IOERR_COMMIT_ATOMIC => return error.SQLiteIOErrCommitAtomic,
|
||||
c.SQLITE_IOERR_ROLLBACK_ATOMIC => return error.SQLiteIOErrRollbackAtomic,
|
||||
|
||||
c.SQLITE_LOCKED_SHAREDCACHE => return error.SQLiteLockedSharedCache,
|
||||
|
||||
c.SQLITE_BUSY_RECOVERY => return error.SQLiteBusyRecovery,
|
||||
c.SQLITE_BUSY_SNAPSHOT => return error.SQLiteBusySnapshot,
|
||||
|
||||
c.SQLITE_CANTOPEN_NOTEMPDIR => return error.SQLiteCantOpenNoTempDir,
|
||||
c.SQLITE_CANTOPEN_ISDIR => return error.SQLiteCantOpenIsDir,
|
||||
c.SQLITE_CANTOPEN_FULLPATH => return error.SQLiteCantOpenFullPath,
|
||||
c.SQLITE_CANTOPEN_CONVPATH => return error.SQLiteCantOpenConvPath,
|
||||
|
||||
c.SQLITE_CORRUPT_VTAB => return error.SQLiteCorruptVTab,
|
||||
|
||||
c.SQLITE_READONLY_RECOVERY => return error.SQLiteReadOnlyRecovery,
|
||||
c.SQLITE_READONLY_CANTLOCK => return error.SQLiteReadOnlyCantLock,
|
||||
c.SQLITE_READONLY_ROLLBACK => return error.SQLiteReadOnlyRollback,
|
||||
c.SQLITE_READONLY_DBMOVED => return error.SQLiteReadOnlyDBMoved,
|
||||
|
||||
c.SQLITE_ABORT_ROLLBACK => return error.SQLiteAbortRollback,
|
||||
|
||||
c.SQLITE_CONSTRAINT_CHECK => return error.SQLiteConstraintCheck,
|
||||
c.SQLITE_CONSTRAINT_COMMITHOOK => return error.SQLiteConstraintCommitHook,
|
||||
c.SQLITE_CONSTRAINT_FOREIGNKEY => return error.SQLiteConstraintForeignKey,
|
||||
c.SQLITE_CONSTRAINT_FUNCTION => return error.SQLiteConstraintFunction,
|
||||
c.SQLITE_CONSTRAINT_NOTNULL => return error.SQLiteConstraintNotNull,
|
||||
c.SQLITE_CONSTRAINT_PRIMARYKEY => return error.SQLiteConstraintPrimaryKey,
|
||||
c.SQLITE_CONSTRAINT_TRIGGER => return error.SQLiteConstraintTrigger,
|
||||
c.SQLITE_CONSTRAINT_UNIQUE => return error.SQLiteConstraintUnique,
|
||||
c.SQLITE_CONSTRAINT_VTAB => return error.SQLiteConstraintVTab,
|
||||
c.SQLITE_CONSTRAINT_ROWID => return error.SQLiteConstraintRowID,
|
||||
|
||||
else => std.debug.panic("invalid result code {}", .{code}),
|
||||
}
|
||||
}
|
||||
|
||||
/// DetailedError contains a SQLite error code and error message.
|
||||
pub const DetailedError = struct {
|
||||
code: usize,
|
||||
near: i32,
|
||||
message: []const u8,
|
||||
|
||||
pub fn format(self: @This(), comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = fmt;
|
||||
_ = options;
|
||||
|
||||
_ = try writer.print("{{code: {}, near: {d}, message: {s}}}", .{ self.code, self.near, self.message });
|
||||
}
|
||||
};
|
||||
|
||||
pub fn getDetailedErrorFromResultCode(code: c_int) DetailedError {
|
||||
return .{
|
||||
.code = @intCast(usize, code),
|
||||
.near = -1,
|
||||
.message = blk: {
|
||||
const msg = c.sqlite3_errstr(code);
|
||||
break :blk mem.sliceTo(msg, 0);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getErrorOffset(db: *c.sqlite3) i32 {
|
||||
if (comptime versionGreaterThanOrEqualTo(3, 38, 0)) {
|
||||
return c.sqlite3_error_offset(db);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
pub fn getLastDetailedErrorFromDb(db: *c.sqlite3) DetailedError {
|
||||
return .{
|
||||
.code = @intCast(usize, c.sqlite3_extended_errcode(db)),
|
||||
.near = getErrorOffset(db),
|
||||
.message = blk: {
|
||||
const msg = c.sqlite3_errmsg(db);
|
||||
break :blk mem.sliceTo(msg, 0);
|
||||
},
|
||||
};
|
||||
}
|
59
src/.deps/zig-sqlite/examples/zigcrypto.zig
Normal file
59
src/.deps/zig-sqlite/examples/zigcrypto.zig
Normal file
|
@ -0,0 +1,59 @@
|
|||
const std = @import("std");
|
||||
const Blake3 = std.crypto.hash.Blake3;
|
||||
const Sha3_512 = std.crypto.hash.sha3.Sha3_512;
|
||||
|
||||
const sqlite = @import("sqlite");
|
||||
const c = sqlite.c;
|
||||
|
||||
const name = "zigcrypto";
|
||||
|
||||
pub const loadable_extension = true;
|
||||
|
||||
var module_allocator: std.heap.GeneralPurposeAllocator(.{}) = undefined;
|
||||
var module_context: sqlite.vtab.ModuleContext = undefined;
|
||||
|
||||
const logger = std.log.scoped(.zigcrypto);
|
||||
|
||||
fn createAllFunctions(db: *sqlite.Db) !void {
|
||||
try db.createScalarFunction(
|
||||
"blake3",
|
||||
struct {
|
||||
fn run(input: []const u8) [Blake3.digest_length]u8 {
|
||||
var output: [Blake3.digest_length]u8 = undefined;
|
||||
Blake3.hash(input, output[0..], .{});
|
||||
return output;
|
||||
}
|
||||
}.run,
|
||||
.{},
|
||||
);
|
||||
try db.createScalarFunction(
|
||||
"sha3_512",
|
||||
struct {
|
||||
fn run(input: []const u8) [Sha3_512.digest_length]u8 {
|
||||
var output: [Sha3_512.digest_length]u8 = undefined;
|
||||
Sha3_512.hash(input, output[0..], .{});
|
||||
return output;
|
||||
}
|
||||
}.run,
|
||||
.{},
|
||||
);
|
||||
}
|
||||
|
||||
pub export fn sqlite3_zigcrypto_init(raw_db: *c.sqlite3, err_msg: [*c][*c]u8, api: *c.sqlite3_api_routines) callconv(.C) c_int {
|
||||
_ = err_msg;
|
||||
|
||||
c.sqlite3_api = api;
|
||||
|
||||
module_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
var db = sqlite.Db{
|
||||
.db = raw_db,
|
||||
};
|
||||
|
||||
createAllFunctions(&db) catch |err| {
|
||||
logger.err("unable to create all SQLite functions, err: {!}", .{err});
|
||||
return c.SQLITE_ERROR;
|
||||
};
|
||||
|
||||
return c.SQLITE_OK;
|
||||
}
|
50
src/.deps/zig-sqlite/examples/zigcrypto_test.zig
Normal file
50
src/.deps/zig-sqlite/examples/zigcrypto_test.zig
Normal file
|
@ -0,0 +1,50 @@
|
|||
const std = @import("std");
|
||||
const debug = std.debug;
|
||||
const mem = std.mem;
|
||||
|
||||
const sqlite = @import("sqlite");
|
||||
|
||||
pub fn main() anyerror!void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
var arena = std.heap.ArenaAllocator.init(gpa.allocator());
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
//
|
||||
|
||||
var db = try sqlite.Db.init(.{
|
||||
.mode = sqlite.Db.Mode{ .Memory = {} },
|
||||
.open_flags = .{ .write = true },
|
||||
});
|
||||
defer db.deinit();
|
||||
|
||||
{
|
||||
const result = sqlite.c.sqlite3_enable_load_extension(db.db, 1);
|
||||
debug.assert(result == sqlite.c.SQLITE_OK);
|
||||
}
|
||||
|
||||
{
|
||||
var pzErrMsg: [*c]u8 = undefined;
|
||||
const result = sqlite.c.sqlite3_load_extension(db.db, "./zig-out/lib/libzigcrypto", null, &pzErrMsg);
|
||||
if (result != sqlite.c.SQLITE_OK) {
|
||||
const err = sqlite.c.sqlite3_errstr(result);
|
||||
std.debug.panic("unable to load extension, err: {s}, err message: {s}\n", .{ err, std.mem.sliceTo(pzErrMsg, 0) });
|
||||
}
|
||||
}
|
||||
|
||||
var diags = sqlite.Diagnostics{};
|
||||
|
||||
const blake3_digest = db.oneAlloc([]const u8, allocator, "SELECT hex(blake3('foobar'))", .{ .diags = &diags }, .{}) catch |err| {
|
||||
debug.print("unable to get blake3 hash, err: {!}, diags: {s}\n", .{ err, diags });
|
||||
return err;
|
||||
};
|
||||
debug.assert(blake3_digest != null);
|
||||
debug.assert(mem.eql(u8, "AA51DCD43D5C6C5203EE16906FD6B35DB298B9B2E1DE3FCE81811D4806B76B7D", blake3_digest.?));
|
||||
|
||||
const sha3_digest = db.oneAlloc([]const u8, allocator, "SELECT hex(sha3_512('foobar'))", .{ .diags = &diags }, .{}) catch |err| {
|
||||
debug.print("unable to get sha3 hash, err: {!}, diags: {s}\n", .{ err, diags });
|
||||
return err;
|
||||
};
|
||||
debug.assert(sha3_digest != null);
|
||||
debug.assert(mem.eql(u8, "FF32A30C3AF5012EA395827A3E99A13073C3A8D8410A708568FF7E6EB85968FCCFEBAEA039BC21411E9D43FDB9A851B529B9960FFEA8679199781B8F45CA85E2", sha3_digest.?));
|
||||
}
|
201
src/.deps/zig-sqlite/fuzz/LICENSE.apache2
Normal file
201
src/.deps/zig-sqlite/fuzz/LICENSE.apache2
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
56
src/.deps/zig-sqlite/fuzz/README.md
Normal file
56
src/.deps/zig-sqlite/fuzz/README.md
Normal file
|
@ -0,0 +1,56 @@
|
|||
# fuzz testing
|
||||
|
||||
This repository contains a binary used for fuzz testing.
|
||||
|
||||
# Acknowledgments
|
||||
|
||||
The fuzz setup with AFL++ comes from [Ryan Liptak's](https://www.ryanliptak.com/blog/fuzzing-zig-code/) blog post. See [this example repo](https://github.com/squeek502/zig-fuzzing-example) too.
|
||||
|
||||
# Prerequisites
|
||||
|
||||
To build the fuzz binary we need the `afl-clang-lto` binary in the system path.
|
||||
The recommended way to get that is to [install AFL++](https://github.com/AFLplusplus/AFLplusplus/blob/stable/docs/INSTALL.md).
|
||||
|
||||
If you don't want to install it system-wide you can also do this instead:
|
||||
```
|
||||
make PREFIX=$HOME/local install
|
||||
```
|
||||
then make sure that `$HOME/local/bin` is in your system path.
|
||||
|
||||
If you installed LLVM from source as described in the [Zig wiki](https://github.com/ziglang/zig/wiki/How-to-build-LLVM,-libclang,-and-liblld-from-source#posix), do this instead:
|
||||
```
|
||||
LLVM_CONFIG=$HOME/local/llvm15-release/bin/llvm-config make PREFIX=$HOME/local install
|
||||
```
|
||||
|
||||
# Build and run
|
||||
|
||||
Once AFL++ is installed, build the fuzz binary:
|
||||
```
|
||||
$ zig build fuzz
|
||||
```
|
||||
|
||||
Finally to run the fuzzer do this:
|
||||
```
|
||||
$ afl-fuzz -i - -o fuzz/outputs -- ./zig-out/bin/fuzz
|
||||
```
|
||||
|
||||
Note that `afl-fuzz` might complain about core dumps being sent to an external utility (usually systemd).
|
||||
|
||||
You'll have to do this as root:
|
||||
```
|
||||
# echo core > /proc/sys/kernel/core_pattern
|
||||
```
|
||||
|
||||
`afl-fuzz` might also complain about the scaling governor, setting `AFL_SKIP_CPUFREQ` as suggested is good enough:
|
||||
```
|
||||
$ AFL_SKIP_CPUFREQ=1 afl-fuzz -i - -o fuzz/outputs -- ./zig-out/bin/fuzz
|
||||
```
|
||||
|
||||
# Debugging a crash
|
||||
|
||||
If `afl-fuzz` finds a crash it will be added to `fuzz/outputs/default/crashes.XYZ`.
|
||||
|
||||
To debug the crash you can run the fuzz binary and giving it the content of the crash via stdin, for example:
|
||||
```
|
||||
$ ./zig-out/bin/fuzz < 'fuzz/outputs/default/crashes.2021-12-31-12:43:12/id:000000,sig:06,src:000004,time:210548,execs:1011599,op:havoc,rep:2'
|
||||
```
|
BIN
src/.deps/zig-sqlite/fuzz/inputs/empty_string_crash
Normal file
BIN
src/.deps/zig-sqlite/fuzz/inputs/empty_string_crash
Normal file
Binary file not shown.
1
src/.deps/zig-sqlite/fuzz/inputs/schema.sql
Normal file
1
src/.deps/zig-sqlite/fuzz/inputs/schema.sql
Normal file
|
@ -0,0 +1 @@
|
|||
CREATE TABLE foobar(user integer primary key, name text, data blob);
|
75
src/.deps/zig-sqlite/fuzz/main.zig
Normal file
75
src/.deps/zig-sqlite/fuzz/main.zig
Normal file
|
@ -0,0 +1,75 @@
|
|||
const std = @import("std");
|
||||
const sqlite = @import("sqlite");
|
||||
|
||||
pub export fn main() callconv(.C) void {
|
||||
zigMain() catch unreachable;
|
||||
}
|
||||
|
||||
pub fn zigMain() !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer std.debug.assert(gpa.deinit() == false);
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
// Read the data from stdin
|
||||
const stdin = std.io.getStdIn();
|
||||
const data = try stdin.readToEndAlloc(allocator, std.math.maxInt(usize));
|
||||
defer allocator.free(data);
|
||||
|
||||
var db = try sqlite.Db.init(.{
|
||||
.mode = .Memory,
|
||||
.open_flags = .{
|
||||
.write = true,
|
||||
.create = true,
|
||||
},
|
||||
});
|
||||
defer db.deinit();
|
||||
|
||||
try db.exec("CREATE TABLE test(id integer primary key, name text, data blob)", .{}, .{});
|
||||
|
||||
db.execDynamic(data, .{}, .{}) catch |err| switch (err) {
|
||||
error.SQLiteError => return,
|
||||
error.ExecReturnedData => return,
|
||||
else => return err,
|
||||
};
|
||||
|
||||
db.execDynamic(
|
||||
"INSERT INTO test(name, data) VALUES($name, $data)",
|
||||
.{},
|
||||
.{
|
||||
.name = data,
|
||||
.data = data,
|
||||
},
|
||||
) catch |err| switch (err) {
|
||||
error.SQLiteError => return,
|
||||
else => return err,
|
||||
};
|
||||
|
||||
var stmt = db.prepareDynamic("SELECT name, data FROM test") catch |err| switch (err) {
|
||||
error.SQLiteError => return,
|
||||
else => return err,
|
||||
};
|
||||
defer stmt.deinit();
|
||||
|
||||
var rows_arena = std.heap.ArenaAllocator.init(allocator);
|
||||
defer rows_arena.deinit();
|
||||
|
||||
const row_opt = stmt.oneAlloc(
|
||||
struct {
|
||||
name: sqlite.Text,
|
||||
data: sqlite.Blob,
|
||||
},
|
||||
rows_arena.allocator(),
|
||||
.{},
|
||||
.{},
|
||||
) catch |err| switch (err) {
|
||||
error.SQLiteError => return,
|
||||
else => return err,
|
||||
};
|
||||
|
||||
if (row_opt) |row| {
|
||||
if (!std.mem.eql(u8, row.name.data, data)) return error.InvalidNameField;
|
||||
if (!std.mem.eql(u8, row.data.data, data)) return error.InvalidDataField;
|
||||
} else {
|
||||
return error.NoRowsFound;
|
||||
}
|
||||
}
|
285
src/.deps/zig-sqlite/fuzz/sql.dict
Normal file
285
src/.deps/zig-sqlite/fuzz/sql.dict
Normal file
|
@ -0,0 +1,285 @@
|
|||
# Taken from https://github.com/google/oss-fuzz/blob/f64134b0b3c2b27a4283e55581d41d83d3c3a64f/projects/sqlite3/sql.dict
|
||||
# This code is licensed under the Apache License 2.0, which you can find in the file LICENSE.apache2
|
||||
# This code is Copyright Google.
|
||||
#
|
||||
# AFL dictionary for SQL
|
||||
# ----------------------
|
||||
#
|
||||
# Modeled based on SQLite documentation, contains some number of SQLite
|
||||
# extensions. Other dialects of SQL may benefit from customized dictionaries.
|
||||
#
|
||||
# If you append @1 to the file name when loading this dictionary, afl-fuzz
|
||||
# will also additionally load a selection of pragma keywords that are very
|
||||
# specific to SQLite (and are probably less interesting from the security
|
||||
# standpoint, because they are usually not allowed in non-privileged
|
||||
# contexts).
|
||||
#
|
||||
# Created by Michal Zalewski <lcamtuf@google.com>
|
||||
#
|
||||
|
||||
function_abs=" abs(1)"
|
||||
function_avg=" avg(1)"
|
||||
function_changes=" changes()"
|
||||
function_char=" char(1)"
|
||||
function_coalesce=" coalesce(1,1)"
|
||||
function_count=" count(1)"
|
||||
function_date=" date(1,1,1)"
|
||||
function_datetime=" datetime(1,1,1)"
|
||||
function_decimal=" decimal(1,1)"
|
||||
function_glob=" glob(1,1)"
|
||||
function_group_concat=" group_concat(1,1)"
|
||||
function_hex=" hex(1)"
|
||||
function_ifnull=" ifnull(1,1)"
|
||||
function_instr=" instr(1,1)"
|
||||
function_julianday=" julianday(1,1,1)"
|
||||
function_last_insert_rowid=" last_insert_rowid()"
|
||||
function_length=" length(1)"
|
||||
function_like=" like(1,1)"
|
||||
function_likelihood=" likelihood(1,1)"
|
||||
function_likely=" likely(1)"
|
||||
function_load_extension=" load_extension(1,1)"
|
||||
function_lower=" lower(1)"
|
||||
function_ltrim=" ltrim(1,1)"
|
||||
function_max=" max(1,1)"
|
||||
function_min=" min(1,1)"
|
||||
function_nullif=" nullif(1,1)"
|
||||
function_printf=" printf(1,1)"
|
||||
function_quote=" quote(1)"
|
||||
function_random=" random()"
|
||||
function_randomblob=" randomblob(1)"
|
||||
function_replace=" replace(1,1,1)"
|
||||
function_round=" round(1,1)"
|
||||
function_rtrim=" rtrim(1,1)"
|
||||
function_soundex=" soundex(1)"
|
||||
function_sqlite_compileoption_get=" sqlite_compileoption_get(1)"
|
||||
function_sqlite_compileoption_used=" sqlite_compileoption_used(1)"
|
||||
function_sqlite_source_id=" sqlite_source_id()"
|
||||
function_sqlite_version=" sqlite_version()"
|
||||
function_strftime=" strftime(1,1,1,1)"
|
||||
function_substr=" substr(1,1,1)"
|
||||
function_sum=" sum(1)"
|
||||
function_time=" time(1,1,1)"
|
||||
function_total=" total(1)"
|
||||
function_total_changes=" total_changes()"
|
||||
function_trim=" trim(1,1)"
|
||||
function_typeof=" typeof(1)"
|
||||
function_unicode=" unicode(1)"
|
||||
function_unlikely=" unlikely(1)"
|
||||
function_upper=" upper(1)"
|
||||
function_varchar=" varchar(1)"
|
||||
function_zeroblob=" zeroblob(1)"
|
||||
|
||||
keyword_ABORT="ABORT"
|
||||
keyword_ACTION="ACTION"
|
||||
keyword_ADD="ADD"
|
||||
keyword_AFTER="AFTER"
|
||||
keyword_ALL="ALL"
|
||||
keyword_ALTER="ALTER"
|
||||
keyword_ANALYZE="ANALYZE"
|
||||
keyword_AND="AND"
|
||||
keyword_AS="AS"
|
||||
keyword_ASC="ASC"
|
||||
keyword_ATTACH="ATTACH"
|
||||
keyword_AUTOINCREMENT="AUTOINCREMENT"
|
||||
keyword_BEFORE="BEFORE"
|
||||
keyword_BEGIN="BEGIN"
|
||||
keyword_BETWEEN="BETWEEN"
|
||||
keyword_BY="BY"
|
||||
keyword_CASCADE="CASCADE"
|
||||
keyword_CASE="CASE"
|
||||
keyword_CAST="CAST"
|
||||
keyword_CHECK="CHECK"
|
||||
keyword_COLLATE="COLLATE"
|
||||
keyword_COLUMN="COLUMN"
|
||||
keyword_COMMIT="COMMIT"
|
||||
keyword_CONFLICT="CONFLICT"
|
||||
keyword_CONSTRAINT="CONSTRAINT"
|
||||
keyword_CREATE="CREATE"
|
||||
keyword_CROSS="CROSS"
|
||||
keyword_CURRENT_DATE="CURRENT_DATE"
|
||||
keyword_CURRENT_TIME="CURRENT_TIME"
|
||||
keyword_CURRENT_TIMESTAMP="CURRENT_TIMESTAMP"
|
||||
keyword_DATABASE="DATABASE"
|
||||
keyword_DEFAULT="DEFAULT"
|
||||
keyword_DEFERRABLE="DEFERRABLE"
|
||||
keyword_DEFERRED="DEFERRED"
|
||||
keyword_DELETE="DELETE"
|
||||
keyword_DESC="DESC"
|
||||
keyword_DETACH="DETACH"
|
||||
keyword_DISTINCT="DISTINCT"
|
||||
keyword_DROP="DROP"
|
||||
keyword_EACH="EACH"
|
||||
keyword_ELSE="ELSE"
|
||||
keyword_END="END"
|
||||
keyword_ESCAPE="ESCAPE"
|
||||
keyword_EXCEPT="EXCEPT"
|
||||
keyword_EXCLUSIVE="EXCLUSIVE"
|
||||
keyword_EXISTS="EXISTS"
|
||||
keyword_EXPLAIN="EXPLAIN"
|
||||
keyword_FAIL="FAIL"
|
||||
keyword_FOR="FOR"
|
||||
keyword_FOREIGN="FOREIGN"
|
||||
keyword_FROM="FROM"
|
||||
keyword_FULL="FULL"
|
||||
keyword_GLOB="GLOB"
|
||||
keyword_GROUP="GROUP"
|
||||
keyword_HAVING="HAVING"
|
||||
keyword_IF="IF"
|
||||
keyword_IGNORE="IGNORE"
|
||||
keyword_IMMEDIATE="IMMEDIATE"
|
||||
keyword_IN="IN"
|
||||
keyword_INDEX="INDEX"
|
||||
keyword_INDEXED="INDEXED"
|
||||
keyword_INITIALLY="INITIALLY"
|
||||
keyword_INNER="INNER"
|
||||
keyword_INSERT="INSERT"
|
||||
keyword_INSTEAD="INSTEAD"
|
||||
keyword_INTERSECT="INTERSECT"
|
||||
keyword_INTO="INTO"
|
||||
keyword_IS="IS"
|
||||
keyword_ISNULL="ISNULL"
|
||||
keyword_JOIN="JOIN"
|
||||
keyword_KEY="KEY"
|
||||
keyword_LEFT="LEFT"
|
||||
keyword_LIKE="LIKE"
|
||||
keyword_LIMIT="LIMIT"
|
||||
keyword_MATCH="MATCH"
|
||||
keyword_NATURAL="NATURAL"
|
||||
keyword_NO="NO"
|
||||
keyword_NOT="NOT"
|
||||
keyword_NOTNULL="NOTNULL"
|
||||
keyword_NULL="NULL"
|
||||
keyword_OF="OF"
|
||||
keyword_OFFSET="OFFSET"
|
||||
keyword_ON="ON"
|
||||
keyword_OR="OR"
|
||||
keyword_ORDER="ORDER"
|
||||
keyword_OUTER="OUTER"
|
||||
keyword_PLAN="PLAN"
|
||||
keyword_PRAGMA="PRAGMA"
|
||||
keyword_PRIMARY="PRIMARY"
|
||||
keyword_QUERY="QUERY"
|
||||
keyword_RAISE="RAISE"
|
||||
keyword_RECURSIVE="RECURSIVE"
|
||||
keyword_REFERENCES="REFERENCES"
|
||||
#keyword_REGEXP="REGEXP"
|
||||
keyword_REINDEX="REINDEX"
|
||||
keyword_RELEASE="RELEASE"
|
||||
keyword_RENAME="RENAME"
|
||||
keyword_REPLACE="REPLACE"
|
||||
keyword_RESTRICT="RESTRICT"
|
||||
keyword_RIGHT="RIGHT"
|
||||
keyword_ROLLBACK="ROLLBACK"
|
||||
keyword_ROW="ROW"
|
||||
keyword_SAVEPOINT="SAVEPOINT"
|
||||
keyword_SELECT="SELECT"
|
||||
keyword_SET="SET"
|
||||
keyword_TABLE="TABLE"
|
||||
keyword_TEMP="TEMP"
|
||||
keyword_TEMPORARY="TEMPORARY"
|
||||
keyword_THEN="THEN"
|
||||
keyword_TO="TO"
|
||||
keyword_TRANSACTION="TRANSACTION"
|
||||
keyword_TRIGGER="TRIGGER"
|
||||
keyword_UNION="UNION"
|
||||
keyword_UNIQUE="UNIQUE"
|
||||
keyword_UPDATE="UPDATE"
|
||||
keyword_USING="USING"
|
||||
keyword_VACUUM="VACUUM"
|
||||
keyword_VALUES="VALUES"
|
||||
keyword_VIEW="VIEW"
|
||||
keyword_VIRTUAL="VIRTUAL"
|
||||
keyword_WHEN="WHEN"
|
||||
keyword_WHERE="WHERE"
|
||||
keyword_WITH="WITH"
|
||||
keyword_WITHOUT="WITHOUT"
|
||||
|
||||
operator_concat=" || "
|
||||
operator_ebove_eq=" >="
|
||||
|
||||
snippet_1eq1=" 1=1"
|
||||
snippet_at=" @1"
|
||||
snippet_backticks=" `a`"
|
||||
snippet_blob=" blob"
|
||||
snippet_brackets=" [a]"
|
||||
snippet_colon=" :1"
|
||||
snippet_comment=" /* */"
|
||||
snippet_date="2001-01-01"
|
||||
snippet_dollar=" $1"
|
||||
snippet_dotref=" a.b"
|
||||
snippet_fmtY="%Y"
|
||||
snippet_int=" int"
|
||||
snippet_neg1=" -1"
|
||||
snippet_pair=" a,b"
|
||||
snippet_parentheses=" (1)"
|
||||
snippet_plus2days="+2 days"
|
||||
snippet_qmark=" ?1"
|
||||
snippet_semicolon=" ;"
|
||||
snippet_star=" *"
|
||||
snippet_string_pair=" \"a\",\"b\""
|
||||
|
||||
string_dbl_q=" \"a\""
|
||||
string_escaped_q=" 'a''b'"
|
||||
string_single_q=" 'a'"
|
||||
|
||||
pragma_application_id@1=" application_id"
|
||||
pragma_auto_vacuum@1=" auto_vacuum"
|
||||
pragma_automatic_index@1=" automatic_index"
|
||||
pragma_busy_timeout@1=" busy_timeout"
|
||||
pragma_cache_size@1=" cache_size"
|
||||
pragma_cache_spill@1=" cache_spill"
|
||||
pragma_case_sensitive_like@1=" case_sensitive_like"
|
||||
pragma_checkpoint_fullfsync@1=" checkpoint_fullfsync"
|
||||
pragma_collation_list@1=" collation_list"
|
||||
pragma_compile_options@1=" compile_options"
|
||||
pragma_count_changes@1=" count_changes"
|
||||
pragma_data_store_directory@1=" data_store_directory"
|
||||
pragma_database_list@1=" database_list"
|
||||
pragma_default_cache_size@1=" default_cache_size"
|
||||
pragma_defer_foreign_keys@1=" defer_foreign_keys"
|
||||
pragma_empty_result_callbacks@1=" empty_result_callbacks"
|
||||
pragma_encoding@1=" encoding"
|
||||
pragma_foreign_key_check@1=" foreign_key_check"
|
||||
pragma_foreign_key_list@1=" foreign_key_list"
|
||||
pragma_foreign_keys@1=" foreign_keys"
|
||||
pragma_freelist_count@1=" freelist_count"
|
||||
pragma_full_column_names@1=" full_column_names"
|
||||
pragma_fullfsync@1=" fullfsync"
|
||||
pragma_ignore_check_constraints@1=" ignore_check_constraints"
|
||||
pragma_incremental_vacuum@1=" incremental_vacuum"
|
||||
pragma_index_info@1=" index_info"
|
||||
pragma_index_list@1=" index_list"
|
||||
pragma_integrity_check@1=" integrity_check"
|
||||
pragma_journal_mode@1=" journal_mode"
|
||||
pragma_journal_size_limit@1=" journal_size_limit"
|
||||
pragma_legacy_file_format@1=" legacy_file_format"
|
||||
pragma_locking_mode@1=" locking_mode"
|
||||
pragma_max_page_count@1=" max_page_count"
|
||||
pragma_mmap_size@1=" mmap_size"
|
||||
pragma_page_count@1=" page_count"
|
||||
pragma_page_size@1=" page_size"
|
||||
pragma_parser_trace@1=" parser_trace"
|
||||
pragma_query_only@1=" query_only"
|
||||
pragma_quick_check@1=" quick_check"
|
||||
pragma_read_uncommitted@1=" read_uncommitted"
|
||||
pragma_recursive_triggers@1=" recursive_triggers"
|
||||
pragma_reverse_unordered_selects@1=" reverse_unordered_selects"
|
||||
pragma_schema_version@1=" schema_version"
|
||||
pragma_secure_delete@1=" secure_delete"
|
||||
pragma_short_column_names@1=" short_column_names"
|
||||
pragma_shrink_memory@1=" shrink_memory"
|
||||
pragma_soft_heap_limit@1=" soft_heap_limit"
|
||||
pragma_stats@1=" stats"
|
||||
pragma_synchronous@1=" synchronous"
|
||||
pragma_table_info@1=" table_info"
|
||||
pragma_temp_store@1=" temp_store"
|
||||
pragma_temp_store_directory@1=" temp_store_directory"
|
||||
pragma_threads@1=" threads"
|
||||
pragma_user_version@1=" user_version"
|
||||
pragma_vdbe_addoptrace@1=" vdbe_addoptrace"
|
||||
pragma_vdbe_debug@1=" vdbe_debug"
|
||||
pragma_vdbe_listing@1=" vdbe_listing"
|
||||
pragma_vdbe_trace@1=" vdbe_trace"
|
||||
pragma_wal_autocheckpoint@1=" wal_autocheckpoint"
|
||||
pragma_wal_checkpoint@1=" wal_checkpoint"
|
||||
pragma_writable_schema@1=" writable_schema"
|
88
src/.deps/zig-sqlite/helpers.zig
Normal file
88
src/.deps/zig-sqlite/helpers.zig
Normal file
|
@ -0,0 +1,88 @@
|
|||
const std = @import("std");
|
||||
const debug = std.debug;
|
||||
|
||||
const c = @import("c.zig").c;
|
||||
|
||||
const Blob = @import("sqlite.zig").Blob;
|
||||
const Text = @import("sqlite.zig").Text;
|
||||
|
||||
/// Sets the result of a function call in the context `ctx`.
|
||||
///
|
||||
/// Determines at compile time which sqlite3_result_XYZ function to use based on the type of `result`.
|
||||
pub fn setResult(ctx: ?*c.sqlite3_context, result: anytype) void {
|
||||
const ResultType = @TypeOf(result);
|
||||
|
||||
switch (ResultType) {
|
||||
Text => c.sqlite3_result_text(ctx, result.data.ptr, @intCast(c_int, result.data.len), c.SQLITE_TRANSIENT),
|
||||
Blob => c.sqlite3_result_blob(ctx, result.data.ptr, @intCast(c_int, result.data.len), c.SQLITE_TRANSIENT),
|
||||
else => switch (@typeInfo(ResultType)) {
|
||||
.Int => |info| if ((info.bits + if (info.signedness == .unsigned) 1 else 0) <= 32) {
|
||||
c.sqlite3_result_int(ctx, result);
|
||||
} else if ((info.bits + if (info.signedness == .unsigned) 1 else 0) <= 64) {
|
||||
c.sqlite3_result_int64(ctx, result);
|
||||
} else {
|
||||
@compileError("integer " ++ @typeName(ResultType) ++ " is not representable in sqlite");
|
||||
},
|
||||
.Float => c.sqlite3_result_double(ctx, result),
|
||||
.Bool => c.sqlite3_result_int(ctx, if (result) 1 else 0),
|
||||
.Array => |arr| switch (arr.child) {
|
||||
u8 => c.sqlite3_result_blob(ctx, &result, arr.len, c.SQLITE_TRANSIENT),
|
||||
else => @compileError("cannot use a result of type " ++ @typeName(ResultType)),
|
||||
},
|
||||
.Pointer => |ptr| switch (ptr.size) {
|
||||
.Slice => switch (ptr.child) {
|
||||
u8 => c.sqlite3_result_text(ctx, result.ptr, @intCast(c_int, result.len), c.SQLITE_TRANSIENT),
|
||||
else => @compileError("cannot use a result of type " ++ @typeName(ResultType)),
|
||||
},
|
||||
else => @compileError("cannot use a result of type " ++ @typeName(ResultType)),
|
||||
},
|
||||
else => @compileError("cannot use a result of type " ++ @typeName(ResultType)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets a type using the provided value.
|
||||
///
|
||||
/// Determines at compile time which sqlite3_value_XYZ function to use based on the type `ArgType`.
|
||||
pub fn setTypeFromValue(comptime ArgType: type, arg: *ArgType, sqlite_value: *c.sqlite3_value) void {
|
||||
switch (ArgType) {
|
||||
Text => arg.*.data = sliceFromValue(sqlite_value),
|
||||
Blob => arg.*.data = sliceFromValue(sqlite_value),
|
||||
else => switch (@typeInfo(ArgType)) {
|
||||
.Int => |info| if ((info.bits + if (info.signedness == .unsigned) 1 else 0) <= 32) {
|
||||
const value = c.sqlite3_value_int(sqlite_value);
|
||||
arg.* = @intCast(ArgType, value);
|
||||
} else if ((info.bits + if (info.signedness == .unsigned) 1 else 0) <= 64) {
|
||||
const value = c.sqlite3_value_int64(sqlite_value);
|
||||
arg.* = @intCast(ArgType, value);
|
||||
} else {
|
||||
@compileError("integer " ++ @typeName(ArgType) ++ " is not representable in sqlite");
|
||||
},
|
||||
.Float => {
|
||||
const value = c.sqlite3_value_double(sqlite_value);
|
||||
arg.* = @floatCast(ArgType, value);
|
||||
},
|
||||
.Bool => {
|
||||
const value = c.sqlite3_value_int(sqlite_value);
|
||||
arg.* = value > 0;
|
||||
},
|
||||
.Pointer => |ptr| switch (ptr.size) {
|
||||
.Slice => switch (ptr.child) {
|
||||
u8 => arg.* = sliceFromValue(sqlite_value),
|
||||
else => @compileError("cannot use an argument of type " ++ @typeName(ArgType)),
|
||||
},
|
||||
else => @compileError("cannot use an argument of type " ++ @typeName(ArgType)),
|
||||
},
|
||||
else => @compileError("cannot use an argument of type " ++ @typeName(ArgType)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn sliceFromValue(sqlite_value: *c.sqlite3_value) []const u8 {
|
||||
const size = @intCast(usize, c.sqlite3_value_bytes(sqlite_value));
|
||||
|
||||
const value = c.sqlite3_value_text(sqlite_value);
|
||||
debug.assert(value != null); // TODO(vincent): how do we handle this properly ?
|
||||
|
||||
return value[0..size];
|
||||
}
|
406
src/.deps/zig-sqlite/query.zig
Normal file
406
src/.deps/zig-sqlite/query.zig
Normal file
|
@ -0,0 +1,406 @@
|
|||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
|
||||
const Blob = @import("sqlite.zig").Blob;
|
||||
const Text = @import("sqlite.zig").Text;
|
||||
|
||||
const BindMarker = struct {
|
||||
/// Contains the expected type for a bind parameter which will be checked
|
||||
/// at comptime when calling bind on a statement.
|
||||
///
|
||||
/// A null means the bind parameter is untyped so there won't be comptime checking.
|
||||
typed: ?type = null,
|
||||
};
|
||||
|
||||
fn isNamedIdentifierChar(c: u8) bool {
|
||||
return std.ascii.isAlphabetic(c) or std.ascii.isDigit(c) or c == '_';
|
||||
}
|
||||
|
||||
pub fn ParsedQuery(comptime query: []const u8) ParsedQueryState(query.len) {
|
||||
// This contains the final SQL query after parsing with our
|
||||
// own typed bind markers removed.
|
||||
comptime var buf: [query.len]u8 = undefined;
|
||||
comptime var pos = 0;
|
||||
comptime var state = .start;
|
||||
|
||||
comptime var current_bind_marker_type: [256]u8 = undefined;
|
||||
comptime var current_bind_marker_type_pos = 0;
|
||||
|
||||
// becomes part of our result
|
||||
comptime var bind_markers: [128]BindMarker = undefined;
|
||||
comptime var nb_bind_markers: usize = 0;
|
||||
|
||||
inline for (query) |c| {
|
||||
switch (state) {
|
||||
.start => switch (c) {
|
||||
'?', ':', '@', '$' => {
|
||||
bind_markers[nb_bind_markers] = BindMarker{};
|
||||
current_bind_marker_type_pos = 0;
|
||||
state = .bind_marker;
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
'\'', '"' => {
|
||||
state = .inside_string;
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
else => {
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
},
|
||||
.inside_string => switch (c) {
|
||||
'\'', '"' => {
|
||||
state = .start;
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
else => {
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
},
|
||||
.bind_marker => switch (c) {
|
||||
'?', ':', '@', '$' => @compileError("invalid multiple '?', ':', '$' or '@'."),
|
||||
'{' => {
|
||||
state = .bind_marker_type;
|
||||
},
|
||||
else => {
|
||||
if (isNamedIdentifierChar(c)) {
|
||||
// This is the start of a named bind marker.
|
||||
state = .bind_marker_identifier;
|
||||
} else {
|
||||
// This is a unnamed, untyped bind marker.
|
||||
state = .start;
|
||||
|
||||
bind_markers[nb_bind_markers].typed = null;
|
||||
nb_bind_markers += 1;
|
||||
}
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
},
|
||||
.bind_marker_identifier => switch (c) {
|
||||
'?', ':', '@', '$' => @compileError("unregconised multiple '?', ':', '$' or '@'."),
|
||||
'{' => {
|
||||
state = .bind_marker_type;
|
||||
current_bind_marker_type_pos = 0;
|
||||
},
|
||||
else => {
|
||||
if (!isNamedIdentifierChar(c)) {
|
||||
// This marks the end of the named bind marker.
|
||||
state = .start;
|
||||
nb_bind_markers += 1;
|
||||
}
|
||||
buf[pos] = c;
|
||||
pos += 1;
|
||||
},
|
||||
},
|
||||
.bind_marker_type => switch (c) {
|
||||
'}' => {
|
||||
state = .start;
|
||||
|
||||
const type_info_string = current_bind_marker_type[0..current_bind_marker_type_pos];
|
||||
// Handles optional types
|
||||
const typ = if (type_info_string[0] == '?') blk: {
|
||||
const child_type = ParseType(type_info_string[1..]);
|
||||
break :blk @Type(std.builtin.Type{
|
||||
.Optional = .{
|
||||
.child = child_type,
|
||||
},
|
||||
});
|
||||
} else blk: {
|
||||
break :blk ParseType(type_info_string);
|
||||
};
|
||||
|
||||
bind_markers[nb_bind_markers].typed = typ;
|
||||
nb_bind_markers += 1;
|
||||
},
|
||||
else => {
|
||||
current_bind_marker_type[current_bind_marker_type_pos] = c;
|
||||
current_bind_marker_type_pos += 1;
|
||||
},
|
||||
},
|
||||
else => {
|
||||
@compileError("invalid state " ++ @tagName(state));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// The last character was a bind marker prefix so this must be an untyped bind marker.
|
||||
switch (state) {
|
||||
.bind_marker => {
|
||||
bind_markers[nb_bind_markers].typed = null;
|
||||
nb_bind_markers += 1;
|
||||
},
|
||||
.bind_marker_identifier => {
|
||||
nb_bind_markers += 1;
|
||||
},
|
||||
.start => {},
|
||||
else => @compileError("invalid final state " ++ @tagName(state) ++ ", this means you wrote an incomplete bind marker type"),
|
||||
}
|
||||
|
||||
var parsed_state = ParsedQueryState(query.len){
|
||||
.bind_markers = bind_markers,
|
||||
.nb_bind_markers = nb_bind_markers,
|
||||
.query = undefined,
|
||||
.query_len = pos,
|
||||
};
|
||||
|
||||
std.mem.copy(u8, &parsed_state.query, &buf);
|
||||
|
||||
return parsed_state;
|
||||
}
|
||||
|
||||
pub fn ParsedQueryState(comptime max_query_len: usize) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
bind_markers: [128]BindMarker,
|
||||
nb_bind_markers: usize,
|
||||
query: [max_query_len]u8,
|
||||
query_len: usize,
|
||||
|
||||
pub fn getQuery(comptime self: *const Self) []const u8 {
|
||||
return self.query[0..self.query_len];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn ParseType(comptime type_info: []const u8) type {
|
||||
if (type_info.len <= 0) @compileError("invalid type info " ++ type_info);
|
||||
|
||||
// Integer
|
||||
if (mem.eql(u8, "usize", type_info)) return usize;
|
||||
if (mem.eql(u8, "isize", type_info)) return isize;
|
||||
|
||||
if (type_info[0] == 'u' or type_info[0] == 'i') {
|
||||
return @Type(std.builtin.Type{
|
||||
.Int = std.builtin.Type.Int{
|
||||
.signedness = if (type_info[0] == 'i') .signed else .unsigned,
|
||||
.bits = std.fmt.parseInt(usize, type_info[1..type_info.len], 10) catch {
|
||||
@compileError("invalid type info " ++ type_info);
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Float
|
||||
if (mem.eql(u8, "f16", type_info)) return f16;
|
||||
if (mem.eql(u8, "f32", type_info)) return f32;
|
||||
if (mem.eql(u8, "f64", type_info)) return f64;
|
||||
if (mem.eql(u8, "f128", type_info)) return f128;
|
||||
|
||||
// Bool
|
||||
if (mem.eql(u8, "bool", type_info)) return bool;
|
||||
|
||||
// Strings
|
||||
if (mem.eql(u8, "[]const u8", type_info) or mem.eql(u8, "[]u8", type_info)) {
|
||||
return []const u8;
|
||||
}
|
||||
if (mem.eql(u8, "text", type_info)) return Text;
|
||||
if (mem.eql(u8, "blob", type_info)) return Blob;
|
||||
|
||||
@compileError("invalid type info " ++ type_info);
|
||||
}
|
||||
|
||||
test "parsed query: query" {
|
||||
const testCase = struct {
|
||||
query: []const u8,
|
||||
expected_query: []const u8,
|
||||
};
|
||||
|
||||
const testCases = &[_]testCase{
|
||||
.{
|
||||
.query = "INSERT INTO user(id, name, age) VALUES(?{usize}, ?{[]const u8}, ?{u32})",
|
||||
.expected_query = "INSERT INTO user(id, name, age) VALUES(?, ?, ?)",
|
||||
},
|
||||
.{
|
||||
.query = "SELECT id, name, age FROM user WHER age > ?{u32} AND age < ?{u32}",
|
||||
.expected_query = "SELECT id, name, age FROM user WHER age > ? AND age < ?",
|
||||
},
|
||||
.{
|
||||
.query = "SELECT id, name, age FROM user WHER age > ? AND age < ?",
|
||||
.expected_query = "SELECT id, name, age FROM user WHER age > ? AND age < ?",
|
||||
},
|
||||
};
|
||||
|
||||
inline for (testCases) |tc| {
|
||||
@setEvalBranchQuota(100000);
|
||||
comptime var parsed_query = ParsedQuery(tc.query);
|
||||
try testing.expectEqualStrings(tc.expected_query, parsed_query.getQuery());
|
||||
}
|
||||
}
|
||||
|
||||
test "parsed query: bind markers types" {
|
||||
const testCase = struct {
|
||||
query: []const u8,
|
||||
expected_marker: BindMarker,
|
||||
};
|
||||
|
||||
const prefixes = &[_][]const u8{
|
||||
"?",
|
||||
"?123",
|
||||
":",
|
||||
":hello",
|
||||
"$",
|
||||
"$foobar",
|
||||
"@",
|
||||
"@name",
|
||||
};
|
||||
|
||||
inline for (prefixes) |prefix| {
|
||||
const testCases = &[_]testCase{
|
||||
.{
|
||||
.query = "foobar " ++ prefix ++ "{usize}",
|
||||
.expected_marker = .{ .typed = usize },
|
||||
},
|
||||
.{
|
||||
.query = "foobar " ++ prefix ++ "{text}",
|
||||
.expected_marker = .{ .typed = Text },
|
||||
},
|
||||
.{
|
||||
.query = "foobar " ++ prefix ++ "{blob}",
|
||||
.expected_marker = .{ .typed = Blob },
|
||||
},
|
||||
.{
|
||||
.query = "foobar " ++ prefix,
|
||||
.expected_marker = .{ .typed = null },
|
||||
},
|
||||
.{
|
||||
.query = "foobar " ++ prefix ++ "{?[]const u8}",
|
||||
.expected_marker = .{ .typed = ?[]const u8 },
|
||||
},
|
||||
};
|
||||
|
||||
inline for (testCases) |tc| {
|
||||
@setEvalBranchQuota(100000);
|
||||
comptime var parsed_query = ParsedQuery(tc.query);
|
||||
|
||||
try testing.expectEqual(1, parsed_query.nb_bind_markers);
|
||||
|
||||
const bind_marker = parsed_query.bind_markers[0];
|
||||
try testing.expectEqual(tc.expected_marker.typed, bind_marker.typed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
test "parsed query: bind markers identifier" {
|
||||
const testCase = struct {
|
||||
query: []const u8,
|
||||
expected_marker: BindMarker,
|
||||
};
|
||||
|
||||
const testCases = &[_]testCase{
|
||||
.{
|
||||
.query = "foobar @ABC{usize}",
|
||||
.expected_marker = .{ .typed = usize },
|
||||
},
|
||||
.{
|
||||
.query = "foobar ?123{text}",
|
||||
.expected_marker = .{ .typed = Text },
|
||||
},
|
||||
.{
|
||||
.query = "foobar $abc{blob}",
|
||||
.expected_marker = .{ .typed = Blob },
|
||||
},
|
||||
.{
|
||||
.query = "foobar :430{u32}",
|
||||
.expected_marker = .{ .typed = u32 },
|
||||
},
|
||||
.{
|
||||
.query = "foobar ?123",
|
||||
.expected_marker = .{},
|
||||
},
|
||||
.{
|
||||
.query = "foobar :hola",
|
||||
.expected_marker = .{},
|
||||
},
|
||||
.{
|
||||
.query = "foobar @foo",
|
||||
.expected_marker = .{},
|
||||
},
|
||||
};
|
||||
|
||||
inline for (testCases) |tc| {
|
||||
comptime var parsed_query = ParsedQuery(tc.query);
|
||||
|
||||
try testing.expectEqual(@as(usize, 1), parsed_query.nb_bind_markers);
|
||||
|
||||
const bind_marker = parsed_query.bind_markers[0];
|
||||
try testing.expectEqual(tc.expected_marker, bind_marker);
|
||||
}
|
||||
}
|
||||
|
||||
test "parsed query: query bind identifier" {
|
||||
const testCase = struct {
|
||||
query: []const u8,
|
||||
expected_query: []const u8,
|
||||
expected_nb_bind_markers: usize,
|
||||
};
|
||||
|
||||
const testCases = &[_]testCase{
|
||||
.{
|
||||
.query = "INSERT INTO user(id, name, age) VALUES(@id{usize}, :name{[]const u8}, $age{u32})",
|
||||
.expected_query = "INSERT INTO user(id, name, age) VALUES(@id, :name, $age)",
|
||||
.expected_nb_bind_markers = 3,
|
||||
},
|
||||
.{
|
||||
.query = "INSERT INTO user(id, name, age) VALUES($id, $name, $age)",
|
||||
.expected_query = "INSERT INTO user(id, name, age) VALUES($id, $name, $age)",
|
||||
.expected_nb_bind_markers = 3,
|
||||
},
|
||||
.{
|
||||
.query = "SELECT id, name, age FROM user WHER age > :ageGT{u32} AND age < @ageLT{u32}",
|
||||
.expected_query = "SELECT id, name, age FROM user WHER age > :ageGT AND age < @ageLT",
|
||||
.expected_nb_bind_markers = 2,
|
||||
},
|
||||
.{
|
||||
.query = "SELECT id, name, age FROM user WHER age > :ageGT AND age < $ageLT",
|
||||
.expected_query = "SELECT id, name, age FROM user WHER age > :ageGT AND age < $ageLT",
|
||||
.expected_nb_bind_markers = 2,
|
||||
},
|
||||
.{
|
||||
.query = "SELECT id, name, age FROM user WHER age > $my_age{i32} AND age < :your_age{i32}",
|
||||
.expected_query = "SELECT id, name, age FROM user WHER age > $my_age AND age < :your_age",
|
||||
.expected_nb_bind_markers = 2,
|
||||
},
|
||||
};
|
||||
|
||||
inline for (testCases) |tc| {
|
||||
@setEvalBranchQuota(100000);
|
||||
comptime var parsed_query = ParsedQuery(tc.query);
|
||||
try testing.expectEqualStrings(tc.expected_query, parsed_query.getQuery());
|
||||
try testing.expectEqual(tc.expected_nb_bind_markers, parsed_query.nb_bind_markers);
|
||||
}
|
||||
}
|
||||
|
||||
test "parsed query: bind marker character inside string" {
|
||||
const testCase = struct {
|
||||
query: []const u8,
|
||||
exp_bind_markers: comptime_int,
|
||||
exp: []const u8,
|
||||
};
|
||||
|
||||
const testCases = &[_]testCase{
|
||||
.{
|
||||
.query = "SELECT json_extract(metadata, '$.name') AS name FROM foobar",
|
||||
.exp_bind_markers = 0,
|
||||
.exp = "SELECT json_extract(metadata, '$.name') AS name FROM foobar",
|
||||
},
|
||||
.{
|
||||
.query = "SELECT json_extract(metadata, '$.name') AS name FROM foobar WHERE name = $name{text}",
|
||||
.exp_bind_markers = 1,
|
||||
.exp = "SELECT json_extract(metadata, '$.name') AS name FROM foobar WHERE name = $name",
|
||||
},
|
||||
};
|
||||
|
||||
inline for (testCases) |tc| {
|
||||
@setEvalBranchQuota(100000);
|
||||
comptime var parsed_query = ParsedQuery(tc.query);
|
||||
|
||||
try testing.expectEqual(@as(usize, tc.exp_bind_markers), parsed_query.nb_bind_markers);
|
||||
try testing.expectEqualStrings(tc.exp, parsed_query.getQuery());
|
||||
}
|
||||
}
|
3973
src/.deps/zig-sqlite/sqlite.zig
Normal file
3973
src/.deps/zig-sqlite/sqlite.zig
Normal file
File diff suppressed because it is too large
Load Diff
50
src/.deps/zig-sqlite/test.zig
Normal file
50
src/.deps/zig-sqlite/test.zig
Normal file
|
@ -0,0 +1,50 @@
|
|||
const std = @import("std");
|
||||
const build_options = @import("build_options");
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
|
||||
const Db = @import("sqlite.zig").Db;
|
||||
|
||||
pub fn getTestDb() !Db {
|
||||
var buf: [1024]u8 = undefined;
|
||||
var fba = std.heap.FixedBufferAllocator.init(&buf);
|
||||
|
||||
var mode = dbMode(fba.allocator());
|
||||
|
||||
return try Db.init(.{
|
||||
.open_flags = .{
|
||||
.write = true,
|
||||
.create = true,
|
||||
},
|
||||
.mode = mode,
|
||||
});
|
||||
}
|
||||
|
||||
fn tmpDbPath(allocator: mem.Allocator) ![:0]const u8 {
|
||||
const tmp_dir = testing.tmpDir(.{});
|
||||
|
||||
const path = try std.fs.path.join(allocator, &[_][]const u8{
|
||||
"zig-cache",
|
||||
"tmp",
|
||||
&tmp_dir.sub_path,
|
||||
"zig-sqlite.db",
|
||||
});
|
||||
defer allocator.free(path);
|
||||
|
||||
return allocator.dupeZ(u8, path);
|
||||
}
|
||||
|
||||
fn dbMode(allocator: mem.Allocator) Db.Mode {
|
||||
return if (build_options.in_memory) blk: {
|
||||
break :blk .{ .Memory = {} };
|
||||
} else blk: {
|
||||
if (build_options.dbfile) |dbfile| {
|
||||
return .{ .File = allocator.dupeZ(u8, dbfile) catch unreachable };
|
||||
}
|
||||
|
||||
const path = tmpDbPath(allocator) catch unreachable;
|
||||
|
||||
std.fs.cwd().deleteFile(path) catch {};
|
||||
break :blk .{ .File = path };
|
||||
};
|
||||
}
|
254
src/.deps/zig-sqlite/tools/preprocess_files.zig
Normal file
254
src/.deps/zig-sqlite/tools/preprocess_files.zig
Normal file
|
@ -0,0 +1,254 @@
|
|||
const std = @import("std");
|
||||
const debug = std.debug;
|
||||
const fmt = std.fmt;
|
||||
const heap = std.heap;
|
||||
const mem = std.mem;
|
||||
|
||||
// This tool is used to preprocess the sqlite3 headers to make them usable to build loadable extensions.
|
||||
//
|
||||
// Due to limitations of `zig translate-c` (used by @cImport) the code produced by @cImport'ing the sqlite3ext.h header is unusable.
|
||||
// The sqlite3ext.h header redefines the SQLite API like this:
|
||||
//
|
||||
// #define sqlite3_open_v2 sqlite3_api->open_v2
|
||||
//
|
||||
// This is not supported by `zig translate-c`, if there's already a definition for a function the aliasing macros won't do anything:
|
||||
// translate-c keeps generating the code for the function defined in sqlite3.h
|
||||
//
|
||||
// Even if there's no definition already (we could for example remove the definition manually from the sqlite3.h file),
|
||||
// the code generated fails to compile because it references the variable sqlite3_api which is not defined
|
||||
//
|
||||
// And even if the sqlite3_api is defined before, the generated code fails to compile because the functions are defined as consts and
|
||||
// can only reference comptime stuff, however sqlite3_api is a runtime variable.
|
||||
//
|
||||
// The only viable option is to completely reomve the original function definitions and redefine all functions in Zig which forward
|
||||
// calls to the sqlite3_api object.
|
||||
//
|
||||
// This works but it requires fairly extensive modifications of both sqlite3.h and sqlite3ext.h which is time consuming to do manually;
|
||||
// this tool is intended to automate all these modifications.
|
||||
|
||||
fn readOriginalData(allocator: mem.Allocator, path: []const u8) ![]const u8 {
|
||||
var file = try std.fs.cwd().openFile(path, .{});
|
||||
defer file.close();
|
||||
|
||||
var reader = file.reader();
|
||||
|
||||
const data = reader.readAllAlloc(allocator, 1024 * 1024);
|
||||
return data;
|
||||
}
|
||||
|
||||
const Processor = struct {
|
||||
const Range = union(enum) {
|
||||
delete: struct {
|
||||
start: usize,
|
||||
end: usize,
|
||||
},
|
||||
replace: struct {
|
||||
start: usize,
|
||||
end: usize,
|
||||
replacement: []const u8,
|
||||
},
|
||||
};
|
||||
|
||||
allocator: mem.Allocator,
|
||||
|
||||
data: []const u8,
|
||||
pos: usize,
|
||||
|
||||
range_start: usize,
|
||||
ranges: std.ArrayList(Range),
|
||||
|
||||
fn init(allocator: mem.Allocator, data: []const u8) !Processor {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.data = data,
|
||||
.pos = 0,
|
||||
.range_start = 0,
|
||||
.ranges = try std.ArrayList(Range).initCapacity(allocator, 4096),
|
||||
};
|
||||
}
|
||||
|
||||
fn readable(self: *Processor) []const u8 {
|
||||
if (self.pos >= self.data.len) return "";
|
||||
|
||||
return self.data[self.pos..];
|
||||
}
|
||||
|
||||
fn previousByte(self: *Processor) ?u8 {
|
||||
if (self.pos <= 0) return null;
|
||||
return self.data[self.pos - 1];
|
||||
}
|
||||
|
||||
fn skipUntil(self: *Processor, needle: []const u8) bool {
|
||||
const pos = mem.indexOfPos(u8, self.data, self.pos, needle);
|
||||
if (pos) |p| {
|
||||
self.pos = p;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn consume(self: *Processor, needle: []const u8) void {
|
||||
debug.assert(self.startsWith(needle));
|
||||
|
||||
self.pos += needle.len;
|
||||
}
|
||||
|
||||
fn startsWith(self: *Processor, needle: []const u8) bool {
|
||||
if (self.pos >= self.data.len) return false;
|
||||
|
||||
const data = self.data[self.pos..];
|
||||
return mem.startsWith(u8, data, needle);
|
||||
}
|
||||
|
||||
fn rangeStart(self: *Processor) void {
|
||||
self.range_start = self.pos;
|
||||
}
|
||||
|
||||
fn rangeDelete(self: *Processor) void {
|
||||
self.ranges.appendAssumeCapacity(Range{
|
||||
.delete = .{
|
||||
.start = self.range_start,
|
||||
.end = self.pos,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
fn rangeReplace(self: *Processor, replacement: []const u8) void {
|
||||
self.ranges.appendAssumeCapacity(Range{
|
||||
.replace = .{
|
||||
.start = self.range_start,
|
||||
.end = self.pos,
|
||||
.replacement = replacement,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
fn dump(self: *Processor, writer: anytype) !void {
|
||||
var pos: usize = 0;
|
||||
for (self.ranges.items) |range| {
|
||||
switch (range) {
|
||||
.delete => |dr| {
|
||||
const to_write = self.data[pos..dr.start];
|
||||
try writer.writeAll(to_write);
|
||||
pos = dr.end;
|
||||
},
|
||||
.replace => |rr| {
|
||||
const to_write = self.data[pos..rr.start];
|
||||
try writer.writeAll(to_write);
|
||||
try writer.writeAll(rr.replacement);
|
||||
pos = rr.end;
|
||||
},
|
||||
}
|
||||
|
||||
// debug.print("excluded range: start={d} end={d} slice=\"{s}\"\n", .{
|
||||
// range.start,
|
||||
// range.end,
|
||||
// processor.data[range.start..range.end],
|
||||
// });
|
||||
}
|
||||
|
||||
// Finally append the remaining data in the buffer (the last range will probably not be the end of the file)
|
||||
if (pos < self.data.len) {
|
||||
const remaining_data = self.data[pos..];
|
||||
try writer.writeAll(remaining_data);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fn preprocessSqlite3HeaderFile(gpa: mem.Allocator) !void {
|
||||
var arena = heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
//
|
||||
|
||||
var data = try readOriginalData(allocator, "c/sqlite3.h");
|
||||
|
||||
var processor = try Processor.init(allocator, data);
|
||||
|
||||
while (true) {
|
||||
// Everything function definition is declared with SQLITE_API.
|
||||
// Stop the loop if there's none in the remaining data.
|
||||
if (!processor.skipUntil("SQLITE_API ")) break;
|
||||
|
||||
// If the byte just before is not a LN it's not a function definition.
|
||||
// There are a couple instances where SQLITE_API appears in a comment.
|
||||
const previous_byte = processor.previousByte() orelse 0;
|
||||
if (previous_byte != '\n') {
|
||||
processor.consume("SQLITE_API ");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now we assume we're at the start of a function definition.
|
||||
//
|
||||
// We keep track of every function definition by marking its start and end position in the data.
|
||||
|
||||
processor.rangeStart();
|
||||
|
||||
processor.consume("SQLITE_API ");
|
||||
if (processor.startsWith("SQLITE_EXTERN ")) {
|
||||
// This is not a function definition, ignore it.
|
||||
// try processor.unmark();
|
||||
continue;
|
||||
}
|
||||
|
||||
_ = processor.skipUntil(");\n");
|
||||
processor.consume(");\n");
|
||||
|
||||
processor.rangeDelete();
|
||||
}
|
||||
|
||||
// Write the result to the file
|
||||
var output_file = try std.fs.cwd().createFile("./c/loadable-ext-sqlite3.h", .{ .mode = 0o0644 });
|
||||
defer output_file.close();
|
||||
|
||||
try processor.dump(output_file.writer());
|
||||
}
|
||||
|
||||
fn preprocessSqlite3ExtHeaderFile(gpa: mem.Allocator) !void {
|
||||
var arena = heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
//
|
||||
|
||||
var data = try readOriginalData(allocator, "c/sqlite3ext.h");
|
||||
|
||||
var processor = try Processor.init(allocator, data);
|
||||
|
||||
// Replace the include line
|
||||
|
||||
debug.assert(processor.skipUntil("#include \"sqlite3.h\""));
|
||||
|
||||
processor.rangeStart();
|
||||
processor.consume("#include \"sqlite3.h\"");
|
||||
processor.rangeReplace("#include \"loadable-ext-sqlite3.h\"");
|
||||
|
||||
// Delete all #define macros
|
||||
|
||||
while (true) {
|
||||
if (!processor.skipUntil("#define sqlite3_")) break;
|
||||
|
||||
processor.rangeStart();
|
||||
|
||||
processor.consume("#define sqlite3_");
|
||||
_ = processor.skipUntil("\n");
|
||||
processor.consume("\n");
|
||||
|
||||
processor.rangeDelete();
|
||||
}
|
||||
|
||||
// Write the result to the file
|
||||
var output_file = try std.fs.cwd().createFile("./c/loadable-ext-sqlite3ext.h", .{ .mode = 0o0644 });
|
||||
defer output_file.close();
|
||||
|
||||
try processor.dump(output_file.writer());
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa = heap.GeneralPurposeAllocator(.{}){};
|
||||
defer if (gpa.deinit() == .leak) debug.panic("leaks detected\n", .{});
|
||||
|
||||
try preprocessSqlite3HeaderFile(gpa.allocator());
|
||||
try preprocessSqlite3ExtHeaderFile(gpa.allocator());
|
||||
}
|
1295
src/.deps/zig-sqlite/vtab.zig
Normal file
1295
src/.deps/zig-sqlite/vtab.zig
Normal file
File diff suppressed because it is too large
Load Diff
10
src/.deps/zig-sqlite/zig.mod
Normal file
10
src/.deps/zig-sqlite/zig.mod
Normal file
|
@ -0,0 +1,10 @@
|
|||
id: nj8usqhaks6kkewaj3pbp0arfh4281me25bl7tf9das1vbqv
|
||||
name: sqlite
|
||||
main: sqlite.zig
|
||||
license: MIT
|
||||
description: Thin SQLite wrapper
|
||||
c_include_dirs:
|
||||
- c
|
||||
c_source_files:
|
||||
- c/sqlite3.c
|
||||
dependencies:
|
118
src/db/db.zig
Normal file
118
src/db/db.zig
Normal file
|
@ -0,0 +1,118 @@
|
|||
const sqlite = @import("sqlite");
|
||||
const std = @import("std");
|
||||
const models = @import("models.zig");
|
||||
const utils = @import("../utils.zig");
|
||||
|
||||
const Allocator = std.mem.Allocator;
|
||||
const print = std.debug.print;
|
||||
|
||||
pub const Db = struct {
|
||||
allocator: Allocator,
|
||||
_mode: ?sqlite.Db.Mode,
|
||||
_sql_db: sqlite.Db,
|
||||
|
||||
pub fn init(allocator: Allocator, mode: ?sqlite.Db.Mode) !Db {
|
||||
var sqlDb = try sqlite.Db.init(.{
|
||||
.mode = if (mode != null) mode.? else sqlite.Db.Mode{ .File = "/home/nate/Source/rluv/zerver/data.db" },
|
||||
.open_flags = .{
|
||||
.write = true,
|
||||
.create = true,
|
||||
},
|
||||
.threading_mode = .MultiThread,
|
||||
});
|
||||
return Db{ .allocator = allocator, ._mode = mode, ._sql_db = sqlDb };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Db) void {
|
||||
self._sql_db.deinit();
|
||||
}
|
||||
|
||||
pub fn selectAllWhere(
|
||||
self: *Db,
|
||||
comptime Type: type,
|
||||
allocator: Allocator,
|
||||
comptime whereClause: []const u8,
|
||||
values: anytype,
|
||||
comptime limit: ?u32,
|
||||
) !?[]Type {
|
||||
_ = limit;
|
||||
|
||||
var res_array: std.ArrayList(Type) = std.ArrayList(Type).init(allocator);
|
||||
|
||||
const query = "SELECT * FROM " ++ models.getTypeTableName(Type) ++ " " ++ whereClause ++ ";";
|
||||
var stmt = try self._sql_db.prepare(query);
|
||||
defer stmt.deinit();
|
||||
|
||||
var iter = try stmt.iteratorAlloc(Type, allocator, values);
|
||||
while (try iter.nextAlloc(allocator, .{})) |row| {
|
||||
try res_array.append(row);
|
||||
}
|
||||
|
||||
return try res_array.toOwnedSlice();
|
||||
}
|
||||
pub fn selectOneById(self: *Db, comptime Type: type, allocator: Allocator, id: u32) !?Type {
|
||||
const row = try self._sql_db.oneAlloc(Type, allocator, models.createSelectOnIdQuery(Type), .{}, .{ .id = id });
|
||||
// std.debug.print("{any}", .{row});
|
||||
return row;
|
||||
}
|
||||
|
||||
pub fn selectOne(self: *Db, comptime Type: type, allocator: Allocator, comptime query: []const u8, values: anytype) !?Type {
|
||||
const row = try self._sql_db.oneAlloc(Type, allocator, query, .{}, values);
|
||||
// std.debug.print("{any}", .{row});
|
||||
return row;
|
||||
}
|
||||
|
||||
pub fn updateById(self: *Db, comptime Type: type, values: anytype) !void {
|
||||
// TODO check there is an ID field
|
||||
const data = utils.structConcatFields(values, .{ .id2 = values.id });
|
||||
self._sql_db.exec(models.createUpdateQuery(Type), .{}, data) catch |err| {
|
||||
std.debug.print("Encountered error while updating data:\n{any}\n\tQuery:{s}\n{any}\n", .{ values, models.createUpdateQuery(Type), err });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn insert(self: *Db, comptime Type: type, values: anytype) !void {
|
||||
// TODO check there is an ID field
|
||||
self._sql_db.exec(models.createInsertQuery(Type), .{}, values) catch |err| {
|
||||
std.debug.print("Encountered error while inserting data:\n{any}\n\tQuery:{s}\n{any}\n", .{ values, models.createInsertQuery(Type), err });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deleteById(self: *Db, comptime Type: type, id: u32) !void {
|
||||
// TODO check there is an ID field
|
||||
self._sql_db.exec(models.createDeleteOnIdQuery(Type), .{}, .{ .id = id }) catch |err| {
|
||||
std.debug.print("Encountered error while deleting id {}\n\tQuery:{s}\n{any}\n", .{ id, models.createInsertQuery(Type), err });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn migrateDb(self: *Db) !void {
|
||||
print("Making migration in DB...\n", .{});
|
||||
_ = try self._sql_db.pragma(void, .{}, "foreign_keys", "1");
|
||||
inline for (models.ModelTypes) |model_type| {
|
||||
self._sql_db.exec(models.createTableMigrationQuery(model_type), .{}, .{}) catch |err| {
|
||||
std.debug.print("Encountered error while executing migration\n\tQuery:{s}\n{any}\n", .{ models.createTableMigrationQuery(model_type), err });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wipeAndMigrateDb(self: *Db) !void {
|
||||
print("Wiping DB data and schema...\n", .{});
|
||||
inline for (models.ModelTypes) |model_type| {
|
||||
self._sql_db.exec(models.createTableDeleteQuery(model_type), .{}, .{}) catch |err| {
|
||||
std.debug.print("Encountered error while executing table deletion\n\tQuery:{s}\n{any}\n", .{ models.createTableDeleteQuery(model_type), err });
|
||||
return err;
|
||||
};
|
||||
}
|
||||
try self.migrateDb();
|
||||
}
|
||||
|
||||
fn formatQuery(
|
||||
query: []const u8,
|
||||
) []const u8 {
|
||||
const max_chars = 260;
|
||||
return if (query.len > max_chars) query[0..max_chars] ++ "..." else query;
|
||||
}
|
||||
};
|
183
src/db/models.zig
Normal file
183
src/db/models.zig
Normal file
|
@ -0,0 +1,183 @@
|
|||
const std = @import("std");
|
||||
const utils = @import("../utils.zig");
|
||||
|
||||
pub const Transaction = struct {
|
||||
id: u32,
|
||||
amount: f64,
|
||||
type: []const u8,
|
||||
memo: ?[]const u8,
|
||||
budget_id: u32,
|
||||
added_by_user_id: u32,
|
||||
budget_category_id: ?u32,
|
||||
date: u64,
|
||||
created_at: u64,
|
||||
updated_at: u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub const BudgetCategory = struct {
|
||||
id: u32,
|
||||
budget_id: u32,
|
||||
amount: f64,
|
||||
name: []const u8,
|
||||
color: []const u8,
|
||||
created_at: u64,
|
||||
updated_at: u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub const Budget = struct {
|
||||
id: u32,
|
||||
name: []const u8,
|
||||
created_at: u64,
|
||||
updated_at: u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub const User = struct {
|
||||
id: u32,
|
||||
name: []const u8,
|
||||
family_id: u32,
|
||||
budget_id: u32,
|
||||
created_at: u64,
|
||||
updated_at: u64,
|
||||
last_activity_at: u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub const Family = struct {
|
||||
id: u32,
|
||||
budget_id: u32,
|
||||
hide: u8,
|
||||
created_at: u64,
|
||||
updated_at: u64,
|
||||
};
|
||||
|
||||
pub const ModelTypes = [5]type{ Transaction, BudgetCategory, Budget, User, Family };
|
||||
|
||||
/// Functions for creating SQLite queries for any models above
|
||||
pub inline fn createSelectOnIdQuery(comptime Type: type) []const u8 {
|
||||
return "SELECT * FROM " ++ getTypeTableName(Type) ++ " WHERE id = ?;";
|
||||
}
|
||||
|
||||
pub inline fn createSelectOnFieldQuery(
|
||||
comptime Type: type,
|
||||
comptime structField: ?std.builtin.Type.StructField,
|
||||
comptime fieldName: ?[]const u8,
|
||||
comptime comparator: []const u8,
|
||||
) ![]const u8 {
|
||||
comptime {
|
||||
try std.testing.expect(fieldName == null and structField != null or fieldName != null and structField == null);
|
||||
var field: []const u8 = undefined;
|
||||
if (structField != null) {
|
||||
field = structField.?;
|
||||
} else {
|
||||
field = fieldName.?;
|
||||
}
|
||||
var query = "SELECT * FROM " ++ getTypeTableName(Type) ++ " WHERE " ++ field ++ " " ++ comparator ++ " ?;";
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn createDeleteOnIdQuery(comptime Type: type) []const u8 {
|
||||
return "DELETE from " ++ getTypeTableName(Type) ++ " WHERE id = ?;";
|
||||
}
|
||||
|
||||
pub inline fn createInsertQuery(comptime Type: type) []const u8 {
|
||||
comptime {
|
||||
var query: []const u8 = "INSERT INTO " ++ getTypeTableName(Type) ++ " (";
|
||||
var qs: []const u8 = "?";
|
||||
inline for (@typeInfo(Type).Struct.fields, 0..) |field, i| {
|
||||
// This is brittle, assumes 'id' struct field is first
|
||||
if (i > 1) {
|
||||
query = query ++ ", ";
|
||||
qs = qs ++ ", ?";
|
||||
}
|
||||
if (i != 0) {
|
||||
query = query ++ field.name;
|
||||
}
|
||||
}
|
||||
query = query ++ ") VALUES (" ++ qs ++ ");";
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn createUpdateQuery(comptime Type: type) []const u8 {
|
||||
comptime {
|
||||
var query: []const u8 = "UPDATE " ++ getTypeTableName(Type) ++ " SET ";
|
||||
inline for (@typeInfo(Type).Struct.fields, 0..) |field, i| {
|
||||
if (i != 0) {
|
||||
query = query ++ " = ?, ";
|
||||
}
|
||||
query = query ++ field.name;
|
||||
}
|
||||
query = query ++ " = ? WHERE id = ?;";
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn createTableDeleteQuery(comptime Type: type) []const u8 {
|
||||
return "DROP TABLE IF EXISTS " ++ getTypeTableName(Type) ++ ";";
|
||||
}
|
||||
|
||||
pub inline fn createTableMigrationQuery(comptime Type: type) []const u8 {
|
||||
comptime {
|
||||
var query: []const u8 = "CREATE TABLE IF NOT EXISTS " ++ getTypeTableName(Type) ++ " (\n";
|
||||
|
||||
inline for (@typeInfo(Type).Struct.fields, 0..) |field, i| {
|
||||
if (i != 0) {
|
||||
query = query ++ ",\n";
|
||||
}
|
||||
query = query ++ "\t" ++ field.name ++ " " ++ getSQLiteColumnMigrateText(field);
|
||||
//std.debug.print("\nField: {any}\n", .{field});
|
||||
}
|
||||
query = query ++ "\n) STRICT;";
|
||||
return query;
|
||||
}
|
||||
}
|
||||
|
||||
inline fn getSQLiteColumnMigrateText(comptime struct_field: std.builtin.Type.StructField) []const u8 {
|
||||
comptime {
|
||||
if (std.mem.eql(u8, struct_field.name, "id")) return "INTEGER PRIMARY KEY AUTOINCREMENT";
|
||||
const val = switch (@typeInfo(struct_field.type)) {
|
||||
.Int => "INTEGER NOT NULL",
|
||||
.Float => "REAL NOT NULL",
|
||||
.Optional => switch (@typeInfo(@typeInfo(struct_field.type).Optional.child)) {
|
||||
.Int => "INTEGER",
|
||||
.Float => "REAL",
|
||||
.Pointer => "TEXT",
|
||||
.Array => "TEXT",
|
||||
else => unreachable,
|
||||
},
|
||||
.Array => "TEXT NOT NULL",
|
||||
.Pointer => "TEXT NOT NULL",
|
||||
else => unreachable,
|
||||
};
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn getTypeTableName(comptime Type: type) []const u8 {
|
||||
comptime {
|
||||
return switch (Type) {
|
||||
User => "users",
|
||||
Transaction => "transactions",
|
||||
Budget => "budgets",
|
||||
BudgetCategory => "budget_categories",
|
||||
Family => "families",
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
test "Comptime Query Builder" {
|
||||
var query = createSelectOnIdQuery(Transaction);
|
||||
std.debug.print("\nGot SELECT query for Transaction\n\t{s}\n", .{query});
|
||||
query = createInsertQuery(Budget);
|
||||
std.debug.print("Got INSERT query for Budget\n\t{s}\n", .{query});
|
||||
query = createUpdateQuery(BudgetCategory);
|
||||
std.debug.print("Got UPDATE query for BudgetCategory\n\t{s}\n", .{query});
|
||||
|
||||
query = createTableMigrationQuery(User);
|
||||
std.debug.print("Migration for user table:\n{s}\n", .{query});
|
||||
}
|
93
src/http_handler.zig
Normal file
93
src/http_handler.zig
Normal file
|
@ -0,0 +1,93 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import(".deps/http.zig/src/httpz.zig");
|
||||
const models = @import("db/models.zig");
|
||||
const ztime = @import(".deps/time.zig");
|
||||
|
||||
const utils = @import("utils.zig");
|
||||
|
||||
const budget = @import("routes/budget.zig");
|
||||
const user = @import("routes/user.zig");
|
||||
const trans = @import("routes/transactions.zig");
|
||||
const dash = @import("routes/dashboard.zig");
|
||||
|
||||
const Db = @import("db/db.zig").Db;
|
||||
|
||||
var db: Db = undefined;
|
||||
|
||||
pub fn getDb() *Db {
|
||||
return &db;
|
||||
}
|
||||
|
||||
pub fn startHttpServer() !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
db = try Db.init(allocator, null);
|
||||
defer db.deinit();
|
||||
|
||||
var server = try httpz.Server().init(allocator, .{ .port = 8081 });
|
||||
|
||||
// overwrite the default notFound handler
|
||||
server.notFound(notFound);
|
||||
|
||||
// overwrite the default error handler
|
||||
server.errorHandler(errorHandler);
|
||||
|
||||
var router = server.router();
|
||||
|
||||
router.get("/user/:id", user.getUser);
|
||||
router.put("/user", user.putUser);
|
||||
router.delete("/user/:id", user.deleteUser);
|
||||
|
||||
router.get("/budget/:id", budget.getBudget);
|
||||
router.put("/budget", budget.putBudget);
|
||||
router.post("/budget", budget.postBudget);
|
||||
|
||||
router.put("/budget_category", budget.putBudgetCategory);
|
||||
router.post("/budget_category", budget.postBudgetCategory);
|
||||
|
||||
router.get("/transactions/:budget_id", trans.getTransactions);
|
||||
router.post("/transactions", trans.postTransaction);
|
||||
router.put("/transactions", trans.putTransaction);
|
||||
|
||||
router.get("/dashboard/:family_id", dash.getDashboard);
|
||||
|
||||
std.debug.print("Starting http server listening on port {}\n", .{8081});
|
||||
// start the server in the current thread, blocking.
|
||||
try server.listen();
|
||||
}
|
||||
|
||||
fn notFound(_: *httpz.Request, res: *httpz.Response) !void {
|
||||
res.status = 404;
|
||||
|
||||
// you can set the body directly to a []u8, but note that the memory
|
||||
// must be valid beyond your handler. Use the res.arena if you need to allocate
|
||||
// memory for the body.
|
||||
res.body = "Not Found";
|
||||
}
|
||||
|
||||
// note that the error handler return `void` and not `!void`
|
||||
fn errorHandler(req: *httpz.Request, res: *httpz.Response, err: anyerror) void {
|
||||
res.status = 500;
|
||||
res.body = "Internal Server Error";
|
||||
std.log.warn("httpz: unhandled exception for request: {s}\nErr: {}", .{ req.url.raw, err });
|
||||
}
|
||||
|
||||
pub fn returnError(message: ?[]const u8, comptime statusCode: u16, res: *httpz.Response) void {
|
||||
comptime {
|
||||
if (statusCode < 300 or statusCode > 500) {
|
||||
@compileError("Failed responses must have status codes between 300 and 500");
|
||||
}
|
||||
}
|
||||
res.status = statusCode;
|
||||
res.json(.{ .success = false, .message = message }, .{}) catch |err| {
|
||||
std.log.warn("Couldnt create error body: {}", .{err});
|
||||
res.body = "{ \"success\": false";
|
||||
};
|
||||
}
|
||||
|
||||
pub fn returnData(data: anytype, res: *httpz.Response) !void {
|
||||
const body = utils.structConcatFields(data, .{ .success = true });
|
||||
res.status = 200;
|
||||
try res.json(body, .{});
|
||||
}
|
14
src/main.zig
Normal file
14
src/main.zig
Normal file
|
@ -0,0 +1,14 @@
|
|||
const std = @import("std");
|
||||
// Define a struct for "global" data passed into your websocket handler
|
||||
const Db = @import("./db/db.zig").Db;
|
||||
const http = @import("./http_handler.zig");
|
||||
|
||||
pub fn main() !void {
|
||||
std.debug.print("\nStarting Server...\n", .{});
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
var db = try Db.init(allocator, null);
|
||||
defer db.deinit();
|
||||
// try db.wipeAndMigrateDb();
|
||||
try http.startHttpServer();
|
||||
}
|
0
src/routes/auth.zig
Normal file
0
src/routes/auth.zig
Normal file
214
src/routes/budget.zig
Normal file
214
src/routes/budget.zig
Normal file
|
@ -0,0 +1,214 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("../.deps/http.zig/src/httpz.zig");
|
||||
const models = @import("../db/models.zig");
|
||||
const ztime = @import("../.deps/time.zig");
|
||||
const utils = @import("../utils.zig");
|
||||
|
||||
const handler = @import("../http_handler.zig");
|
||||
|
||||
pub fn getBudget(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const db = handler.getDb();
|
||||
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
const id_str = req.param("id");
|
||||
if (id_str == null) {
|
||||
res.status = 400;
|
||||
res.body = "Bad Request: No Id";
|
||||
return;
|
||||
}
|
||||
const id = std.fmt.parseInt(u32, id_str.?, 0) catch {
|
||||
res.status = 401;
|
||||
res.body = "Bad Request: Bad Id";
|
||||
return;
|
||||
};
|
||||
|
||||
const budget = try db.selectOneById(models.Budget, allocator, id);
|
||||
|
||||
if (budget == null) {
|
||||
res.status = 404;
|
||||
res.body = "Budget not found";
|
||||
return;
|
||||
}
|
||||
|
||||
try res.json(budget.?, .{});
|
||||
}
|
||||
|
||||
const BudgetPostReq = struct {
|
||||
id: ?u32,
|
||||
name: []const u8,
|
||||
created_at: ?u64,
|
||||
updated_at: ?u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub fn putBudget(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(models.Budget) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad Request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad Request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
// Add Budget
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Update existing Budget
|
||||
body.updated_at = now;
|
||||
try db.updateById(models.Budget, body);
|
||||
|
||||
const query = models.createSelectOnIdQuery(models.Transaction);
|
||||
const updated_budget = try db.selectOne(models.Budget, allocator, query, .{ .id = body.id });
|
||||
if (updated_budget) |budget| {
|
||||
try handler.returnData(budget, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
pub fn postBudget(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
comptime {
|
||||
const putReqLen = @typeInfo(BudgetPostReq).Struct.fields.len;
|
||||
const budgetLen = @typeInfo(models.Budget).Struct.fields.len;
|
||||
if (putReqLen != budgetLen) {
|
||||
@compileError(std.fmt.comptimePrint("BudgetPutReq does not equal Budget model struct, fields inconsistent", .{}));
|
||||
}
|
||||
}
|
||||
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(BudgetPostReq) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
if (body.id != null) {
|
||||
handler.returnError("Bad Request: ID", 400, res);
|
||||
}
|
||||
// Add Budget
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Create Budget
|
||||
body.created_at = now;
|
||||
body.updated_at = now;
|
||||
|
||||
try db.insert(models.Budget, utils.removeStructFields(body, &[_]u8{0}));
|
||||
|
||||
// Get Budget
|
||||
const query = try models.createSelectOnFieldQuery(models.Budget, null, "created_at", "=");
|
||||
const updated_budget = try db.selectOne(models.Budget, allocator, query, .{ .created_at = body.created_at });
|
||||
if (updated_budget) |budget| {
|
||||
try handler.returnData(budget, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const BudgetCatPostReq = struct {
|
||||
id: ?u32,
|
||||
budget_id: u32,
|
||||
amount: f64,
|
||||
name: []const u8,
|
||||
color: []const u8,
|
||||
created_at: ?u64,
|
||||
updated_at: ?u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub fn postBudgetCategory(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
comptime {
|
||||
const putReqLen = @typeInfo(BudgetCatPostReq).Struct.fields.len;
|
||||
const budgetLen = @typeInfo(models.BudgetCategory).Struct.fields.len;
|
||||
if (putReqLen != budgetLen) {
|
||||
@compileError(std.fmt.comptimePrint("BudgetCatPutReq does not equal Budget model struct, fields inconsistent", .{}));
|
||||
}
|
||||
}
|
||||
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(BudgetCatPostReq) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
if (body.id != null) {
|
||||
handler.returnError("Bad request: ID", 400, res);
|
||||
return;
|
||||
}
|
||||
// Add Budget
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Create Budget
|
||||
body.created_at = now;
|
||||
body.updated_at = now;
|
||||
|
||||
try db.insert(models.BudgetCategory, utils.removeStructFields(body, &[_]u8{0}));
|
||||
|
||||
// Get Budget
|
||||
const query = try models.createSelectOnFieldQuery(models.BudgetCategory, null, "created_at", "=");
|
||||
const updated_budget = try db.selectOne(models.BudgetCategory, allocator, query, .{ .created_at = body.created_at });
|
||||
if (updated_budget) |budget| {
|
||||
try handler.returnData(budget, res);
|
||||
} else {
|
||||
std.debug.print("Could not find inserted budget", .{});
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
pub fn putBudgetCategory(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(models.BudgetCategory) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var budget_category = body_data.?;
|
||||
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
|
||||
// Update existing Budget
|
||||
budget_category.updated_at = now;
|
||||
try db.updateById(models.BudgetCategory, budget_category);
|
||||
|
||||
const query = models.createSelectOnIdQuery(models.BudgetCategory);
|
||||
const updated_budget = try db.selectOne(models.BudgetCategory, allocator, query, .{ .id = budget_category.id });
|
||||
if (updated_budget) |budget| {
|
||||
try handler.returnData(budget, res);
|
||||
} else {
|
||||
std.debug.print("Could not find inserted budget", .{});
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
62
src/routes/dashboard.zig
Normal file
62
src/routes/dashboard.zig
Normal file
|
@ -0,0 +1,62 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("../.deps/http.zig/src/httpz.zig");
|
||||
const models = @import("../db/models.zig");
|
||||
const ztime = @import("../.deps/time.zig");
|
||||
const utils = @import("../utils.zig");
|
||||
const trans = @import("transactions.zig");
|
||||
const handler = @import("../http_handler.zig");
|
||||
|
||||
pub fn getDashboard(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const db = handler.getDb();
|
||||
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const family_id_str = req.param("family_id");
|
||||
if (family_id_str == null) {
|
||||
res.status = 400;
|
||||
res.body = "Bad Request: No FamilyId";
|
||||
return;
|
||||
}
|
||||
const family_id = std.fmt.parseInt(u32, family_id_str.?, 0) catch {
|
||||
res.status = 400;
|
||||
res.body = "Bad Request: Bad FamilyId";
|
||||
return;
|
||||
};
|
||||
|
||||
const family = db.selectOneById(models.Family, allocator, family_id) catch |err| {
|
||||
if (err == error.SQLiteError) {
|
||||
res.status = 404;
|
||||
res.body = "Family Not Found";
|
||||
return;
|
||||
}
|
||||
std.debug.print("Error while getting family: {}\n", .{err});
|
||||
res.status = 500;
|
||||
res.body = "Internal Server Error";
|
||||
return;
|
||||
};
|
||||
if (family == null) {
|
||||
res.status = 404;
|
||||
res.body = "Family Not Found";
|
||||
return;
|
||||
}
|
||||
|
||||
const transactions = try trans.fetchTransFromDb(allocator, family.?.budget_id);
|
||||
|
||||
const budget = try db.selectOneById(models.Budget, allocator, family.?.budget_id);
|
||||
var budget_categories: ?[]models.BudgetCategory = null;
|
||||
if (budget != null) {
|
||||
budget_categories = try db.selectAllWhere(models.BudgetCategory, allocator, "WHERE budget_id = ? AND hide = ?", .{ .budget_id = budget.?.id, .hide = 0 }, null);
|
||||
}
|
||||
|
||||
if (budget_categories == null) {
|
||||
budget_categories = &[_]models.BudgetCategory{};
|
||||
}
|
||||
const response_body = .{
|
||||
.family = family.?,
|
||||
.budget = budget,
|
||||
.budget_categories = budget_categories,
|
||||
.transactions = transactions,
|
||||
};
|
||||
try res.json(response_body, .{});
|
||||
}
|
146
src/routes/transactions.zig
Normal file
146
src/routes/transactions.zig
Normal file
|
@ -0,0 +1,146 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("../.deps/http.zig/src/httpz.zig");
|
||||
const models = @import("../db/models.zig");
|
||||
const ztime = @import("../.deps/time.zig");
|
||||
const utils = @import("../utils.zig");
|
||||
|
||||
const handler = @import("../http_handler.zig");
|
||||
|
||||
pub fn fetchTransFromDb(allocator: std.mem.Allocator, budget_id: u32) !?[]models.Transaction {
|
||||
var db = handler.getDb();
|
||||
const now = ztime.DateTime.now();
|
||||
const beginningOfMonth = ztime.DateTime.init(now.years, now.months, 0, 0, 0, 0);
|
||||
|
||||
comptime {
|
||||
if (!std.mem.eql(u8, @typeInfo(models.Transaction).Struct.fields[7].name, "date")) {
|
||||
return error{TransactionModelError};
|
||||
}
|
||||
}
|
||||
const transactions = try db.selectAllWhere(
|
||||
models.Transaction,
|
||||
allocator,
|
||||
"WHERE budget_id = ? AND date > ? AND hide = ?",
|
||||
.{ .budget_id = budget_id, .date = beginningOfMonth.toUnixMilli(), .hide = 0 },
|
||||
null,
|
||||
);
|
||||
return transactions;
|
||||
}
|
||||
|
||||
pub fn getTransactions(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const budget_id_str = req.param("budget_id");
|
||||
if (budget_id_str == null) {
|
||||
handler.returnError("Bad request", 400, res);
|
||||
return;
|
||||
}
|
||||
const budget_id = std.fmt.parseInt(u32, budget_id_str.?, 0) catch {
|
||||
handler.returnError("Bad request", 400, res);
|
||||
return;
|
||||
};
|
||||
|
||||
const transactions = try fetchTransFromDb(allocator, budget_id);
|
||||
|
||||
if (transactions == null) {
|
||||
res.status = 200;
|
||||
res.body = "";
|
||||
return;
|
||||
}
|
||||
// std.debug.print("Transactions got:\n", .{});
|
||||
// for (transactions.?) |transaction| {
|
||||
// std.debug.print("\t{any}\n", .{transaction});
|
||||
// }
|
||||
try handler.returnData(.{ .transactions = transactions.? }, res);
|
||||
}
|
||||
|
||||
pub fn putTransaction(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(models.Transaction) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var transaction = body_data.?;
|
||||
|
||||
// Add Transaction
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Update existing Transaction
|
||||
transaction.updated_at = now;
|
||||
try db.updateById(models.Transaction, transaction);
|
||||
|
||||
const query = models.createSelectOnIdQuery(models.Transaction);
|
||||
const updated_trans = try db.selectOne(models.Transaction, allocator, query, .{ .id = transaction.id });
|
||||
if (updated_trans) |trans| {
|
||||
try handler.returnData(trans, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const TransPostReq = struct {
|
||||
id: ?u32,
|
||||
amount: f64,
|
||||
type: []const u8,
|
||||
memo: ?[]const u8,
|
||||
budget_id: u32,
|
||||
added_by_user_id: u32,
|
||||
budget_category_id: ?u32,
|
||||
date: u64,
|
||||
created_at: ?u64,
|
||||
updated_at: ?u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub fn postTransaction(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
comptime {
|
||||
const putReqLen = @typeInfo(TransPostReq).Struct.fields.len;
|
||||
const transLen = @typeInfo(models.Transaction).Struct.fields.len;
|
||||
if (putReqLen != transLen) {
|
||||
@compileError(std.fmt.comptimePrint("TransactionPutReq does not equal Transaction model struct, fields inconsistent", .{}));
|
||||
}
|
||||
}
|
||||
var db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(TransPostReq) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
if (body.id != null) {
|
||||
handler.returnError("Bad request: ID", 400, res);
|
||||
return;
|
||||
}
|
||||
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
body.created_at = now;
|
||||
body.updated_at = now;
|
||||
|
||||
try db.insert(models.Transaction, utils.removeStructFields(body, &[_]u8{0}));
|
||||
|
||||
// Get new Transaction
|
||||
const query = try models.createSelectOnFieldQuery(models.Transaction, null, "created_at", "=");
|
||||
const updated_tran = try db.selectOne(models.Transaction, allocator, query, .{ .created_at = body.created_at });
|
||||
if (updated_tran) |tran| {
|
||||
try handler.returnData(tran, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
149
src/routes/user.zig
Normal file
149
src/routes/user.zig
Normal file
|
@ -0,0 +1,149 @@
|
|||
const std = @import("std");
|
||||
const httpz = @import("../.deps/http.zig/src/httpz.zig");
|
||||
const models = @import("../db/models.zig");
|
||||
const utils = @import("../utils.zig");
|
||||
|
||||
const handler = @import("../http_handler.zig");
|
||||
|
||||
pub fn getUser(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const db = handler.getDb();
|
||||
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
const id_str = req.param("id");
|
||||
if (id_str == null) {
|
||||
handler.returnError("Bad Request: No Id", 400, res);
|
||||
return;
|
||||
}
|
||||
const id = std.fmt.parseInt(u32, id_str.?, 0) catch {
|
||||
handler.returnError("Bad Request: Bad Id", 401, res);
|
||||
return;
|
||||
};
|
||||
|
||||
const user = try db.selectOneById(models.User, allocator, id);
|
||||
|
||||
if (user == null) {
|
||||
handler.returnError("Error: User Not Found", 404, res);
|
||||
return;
|
||||
}
|
||||
|
||||
try handler.returnData(user.?, res);
|
||||
}
|
||||
|
||||
const UserPostReq = struct {
|
||||
id: ?u32,
|
||||
name: []const u8,
|
||||
family_id: u32,
|
||||
budget_id: u32,
|
||||
created_at: ?u64,
|
||||
last_activity_at: ?u64,
|
||||
updated_at: ?u64,
|
||||
hide: u8,
|
||||
};
|
||||
|
||||
pub fn putUser(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(models.User) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
// Add User
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Update existing User
|
||||
|
||||
body.updated_at = now;
|
||||
body.last_activity_at = now;
|
||||
try db.updateById(models.User, body);
|
||||
|
||||
const query = models.createSelectOnIdQuery(models.User);
|
||||
const updated_user = try db.selectOne(models.User, allocator, query, .{ .id = body.id });
|
||||
if (updated_user) |user| {
|
||||
try handler.returnData(user, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
// try res.json(user, .{});
|
||||
}
|
||||
|
||||
pub fn postUser(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
comptime {
|
||||
const putReqLen = @typeInfo(UserPostReq).Struct.fields.len;
|
||||
const userLen = @typeInfo(models.User).Struct.fields.len;
|
||||
if (putReqLen != userLen) {
|
||||
@compileError(std.fmt.comptimePrint("UserPutReq does not equal User model struct, fields inconsistent", .{}));
|
||||
}
|
||||
}
|
||||
|
||||
const db = handler.getDb();
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
const body_data = req.json(UserPostReq) catch |err| {
|
||||
std.debug.print("Malformed body: {any}\n", .{err});
|
||||
handler.returnError("Bad request: Malformed Body", 400, res);
|
||||
return;
|
||||
};
|
||||
if (body_data == null) {
|
||||
handler.returnError("Bad request: No Data", 400, res);
|
||||
return;
|
||||
}
|
||||
var body = body_data.?;
|
||||
|
||||
if (body.id != null) {
|
||||
handler.returnError("Bad request: ID", 400, res);
|
||||
return;
|
||||
}
|
||||
// Add User
|
||||
const now = @intCast(u64, std.time.milliTimestamp());
|
||||
// Create User
|
||||
body.created_at = now;
|
||||
body.last_activity_at = now;
|
||||
body.updated_at = now;
|
||||
|
||||
try db.insert(models.User, utils.removeStructFields(body, &[_]u8{0}));
|
||||
|
||||
// Get new User
|
||||
const query = try models.createSelectOnFieldQuery(models.User, null, "created_at", "=");
|
||||
const updated_user = try db.selectOne(models.User, allocator, query, .{ .created_at = body.created_at });
|
||||
if (updated_user) |user| {
|
||||
try handler.returnData(user, res);
|
||||
} else {
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
pub fn deleteUser(req: *httpz.Request, res: *httpz.Response) !void {
|
||||
const db = handler.getDb();
|
||||
|
||||
const user_id = req.param("id");
|
||||
if (res.body) |_| {
|
||||
handler.returnError("Bad Request", 400, res);
|
||||
return;
|
||||
}
|
||||
if (user_id) |id_str| {
|
||||
const id = std.fmt.parseInt(u32, id_str, 0) catch {
|
||||
handler.returnError("Bad Request: Invalid Id", 400, res);
|
||||
return;
|
||||
};
|
||||
db.deleteById(models.User, id) catch |err| {
|
||||
std.debug.print("Error while deleting user: {}\n", .{err});
|
||||
handler.returnError("Internal Server Error", 500, res);
|
||||
return;
|
||||
};
|
||||
} else {
|
||||
handler.returnError("Bad Request: Missing ID", 400, res);
|
||||
}
|
||||
return;
|
||||
}
|
106
src/utils.zig
Normal file
106
src/utils.zig
Normal file
|
@ -0,0 +1,106 @@
|
|||
const std = @import("std");
|
||||
|
||||
fn SpreadResult(comptime Base: type, comptime Additional: type) type {
|
||||
comptime {
|
||||
// const type_info = @typeInfo(Base);
|
||||
// if (@Type(type_info) != std.builtin.Type.Struct) {
|
||||
// @compileError("Cannot have anything but struct but got: " ++ @typeName(Base));
|
||||
// }
|
||||
// _ = std.fmt.comptimePrint("Passed in base: {} {}", .{ Base, type_info });
|
||||
}
|
||||
var fields = @typeInfo(Base).Struct.fields;
|
||||
|
||||
const additional_fields = @typeInfo(Additional).Struct.fields;
|
||||
@setEvalBranchQuota(additional_fields.len * fields.len * 10);
|
||||
for (additional_fields) |field| {
|
||||
if (@hasField(Base, field.name)) continue;
|
||||
fields = fields ++ &[_]std.builtin.Type.StructField{field};
|
||||
}
|
||||
|
||||
return @Type(.{ .Struct = .{
|
||||
.is_tuple = false,
|
||||
.layout = .Auto,
|
||||
.backing_integer = null,
|
||||
.decls = &.{},
|
||||
.fields = fields,
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn structConcatFields(
|
||||
base: anytype,
|
||||
additional: anytype,
|
||||
) SpreadResult(@TypeOf(base), @TypeOf(additional)) {
|
||||
const Base = @TypeOf(base);
|
||||
const Additional = @TypeOf(additional);
|
||||
var result: SpreadResult(Base, Additional) = undefined;
|
||||
inline for (@typeInfo(Base).Struct.fields) |field| {
|
||||
@field(result, field.name) = @field(base, field.name);
|
||||
}
|
||||
inline for (@typeInfo(Additional).Struct.fields) |field| {
|
||||
@field(result, field.name) = @field(additional, field.name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn RemovedResult(comptime Base: type, comptime indicesToRemove: []const u8) type {
|
||||
var fields = @typeInfo(Base).Struct.fields;
|
||||
|
||||
const remainingFieldsLen = fields.len - indicesToRemove.len;
|
||||
_ = remainingFieldsLen;
|
||||
var remainingFields: ?[]const std.builtin.Type.StructField = null;
|
||||
@setEvalBranchQuota(fields.len * 10);
|
||||
for (fields, 0..) |field, i| {
|
||||
var remove: bool = false;
|
||||
for (indicesToRemove) |r_index| {
|
||||
if (r_index == i) {
|
||||
remove = true;
|
||||
}
|
||||
if (remove) continue;
|
||||
}
|
||||
if (!remove) {
|
||||
if (remainingFields == null) {
|
||||
remainingFields = &[_]std.builtin.Type.StructField{field};
|
||||
} else {
|
||||
remainingFields = remainingFields.? ++ &[_]std.builtin.Type.StructField{field};
|
||||
}
|
||||
}
|
||||
}
|
||||
if (remainingFields) |f| {
|
||||
return @Type(.{ .Struct = .{
|
||||
.is_tuple = false,
|
||||
.layout = .Auto,
|
||||
.backing_integer = null,
|
||||
.decls = &.{},
|
||||
.fields = f,
|
||||
} });
|
||||
}
|
||||
return @Type(.{ .Struct = .{
|
||||
.is_tuple = false,
|
||||
.layout = .Auto,
|
||||
.backing_integer = null,
|
||||
.decls = &.{},
|
||||
.fields = &[_]std.builtin.Type.StructField{},
|
||||
} });
|
||||
}
|
||||
|
||||
pub fn removeStructFields(
|
||||
base: anytype,
|
||||
comptime indicesToRemove: []const u8,
|
||||
) RemovedResult(@TypeOf(base), indicesToRemove) {
|
||||
const Base = @TypeOf(base);
|
||||
var result: RemovedResult(Base, indicesToRemove) = undefined;
|
||||
inline for (@typeInfo(@TypeOf(result)).Struct.fields) |field| {
|
||||
@field(result, field.name) = @field(base, field.name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
test {
|
||||
// const vote = .{ .id = 0, .createdAt = "DATE" };
|
||||
// const data = structConcatFields(vote, .{ .id2 = vote.id });
|
||||
// std.log.err("\n{any}\n", .{data});
|
||||
|
||||
const user = .{ .id = 0, .createdAt = 2, .other = 3, .key = 4 };
|
||||
const date = removeStructFields(user, &[_]u8{4});
|
||||
std.debug.print("\n{any}\n", .{date});
|
||||
}
|
Loading…
Reference in New Issue
Block a user