repo
stringlengths
26
115
file
stringlengths
54
212
language
stringclasses
2 values
license
stringclasses
16 values
content
stringlengths
19
1.07M
https://github.com/typst-doc-cn/tutorial
https://raw.githubusercontent.com/typst-doc-cn/tutorial/main/README.md
markdown
Apache License 2.0
# Tutorial Typst 中文教程 [![下载最新版本](https://custom-icon-badges.demolab.com/badge/-Download-blue?style=for-the-badge&logo=download&logoColor=white "下载最新版本")](https://nightly.link/typst-doc-cn/tutorial/workflows/build/main/ebook.zip) **(latest 版本)** [![下载最新版本](https://custom-icon-badges.demolab.com/badge/-Download-blue?style=for-the-badge&logo=download&logoColor=white "下载最新版本")](https://github.com/typst-doc-cn/tutorial/releases/download/v0.1.0/Typst.Tutorial.CN.v0.1.0.pdf) **(0.1.0 版本)** ## 安装字体 ```bash git submodule update --init --recursive ``` ## 托管为静态网站 ```bash shiroa serve --font-path ./assets/typst-fonts/ --font-path ./assets/fonts/ -w . ./src/ ``` ## 编译为静态网站 ```bash shiroa build --font-path ./assets/typst-fonts/ --font-path ./assets/fonts/ -w . ./src/ ``` ## 编译电子书 ```bash typst compile --root . --font-path ./assets/typst-fonts/ --font-path ./assets/fonts/ ./src/ebook.typ ``` ## 编译单独章节 选择一个章节文件,比如 `第一章.typ`,然后执行: ```bash typst compile --root . --font-path ./assets/typst-fonts/ --font-path ./assets/fonts/ 章节文件.typ ``` ## 复现 Artifacts 生成`typst-docs-v0.11.0.json`: ```bash cargo install --git https://github.com/typst/typst --locked typst-docs --features="cli" --tag v0.11.0 typst-docs --out-file ./assets/artifacts/typst-docs-v0.11.0.json --assets-dir target/typst-docs/assets ```
https://github.com/topdeoo/NENU-Thesis-Typst
https://raw.githubusercontent.com/topdeoo/NENU-Thesis-Typst/master/utils/color.typ
typst
#let colorize(svg, color) = { let blk = black.to-hex() // You might improve this prototypical detection. if svg.contains(blk) { // Just replace svg.replace(blk, color.to-hex()) } else { // Explicitly state color svg.replace("<svg ", "<svg fill=\"" + color.to-hex() + "\" ") } }
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/circuiteria/0.1.0/src/circuit.typ
typst
Apache License 2.0
#import "@preview/cetz:0.2.2": canvas #import "@preview/tidy:0.3.0" /// Draws a block circuit diagram /// /// This function is also available at the package root /// /// - body (none, array, element): A code block in which draw functions have been called /// - length (length, ratio): Optional base unit /// -> none #let circuit(body, length: 2em) = { set text(font: "Source Sans 3") canvas(length: length, body) }
https://github.com/swablab/documents
https://raw.githubusercontent.com/swablab/documents/main/README.md
markdown
Creative Commons Zero v1.0 Universal
# offizielle Dokumente des swablab e.V. ## Installation typst 1. Download der ausführbaren Datei https://github.com/typst/typst/releases ## Installation der benötigten Schriften - [Noto](https://fonts.google.com/specimen/Noto) - [Ubuntu](https://fonts.google.com/specimen/Ubuntu) ## Generieren der PDFs ### Linux ```bash # automatisches kompilieren bei Änderungen typst watch *.typ # einmaliges kompilieren typst compile *.typ ``` ### Windows ```ps1 .\typst.exe compile .\spendenbescheinigung.typ ```
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/font-01.typ
typst
Other
// Test string body. #text("Text") \ #text(red, "Text") \ #text(font: "Ubuntu", blue, "Text") \ #text([Text], teal, font: "IBM Plex Serif") \ #text(red, font: "New Computer Modern", [Text]) \
https://github.com/lphoogenboom/typstThesisDCSC
https://raw.githubusercontent.com/lphoogenboom/typstThesisDCSC/master/chapters/someBasics.typ
typst
#import "../typFiles/chapter.typ": * #show: chapter.with(chapterTitle: "Some Basics", content: [ dasdasdasfgdfgfg ])
https://github.com/AnsgarLichter/cv-typst
https://raw.githubusercontent.com/AnsgarLichter/cv-typst/main/settings/styles.typ
typst
// General Settings #let pageStyle = ( paper: "a4", margin: ( left: 1cm, right: 1cm, top: 0.8cm, bottom: 0.4cm, ) ) #let colors = ( accent: rgb("#007fad") ) #let bodyStyle = ( fonts: ("Source Sans Pro", "Font Awesome 6 Brands", "Font Awesome 6 Free"), size: 10pt, weight: "regular" ) #let listStyle = ( indent: 1em ) // Header #let headerStyle = ( fonts: ("New Computer Modern Sans"), table: ( columns: (5fr, 1fr), columnGutter: 30pt ), fullName: ( size: 36pt, weight: "bold" ), jobTitle: ( size: 18pt, weight: "bold" ), profilePhoto: ( width: 100pt, height: 100pt, stroke: none, radius: 9999pt, imageHeight: 10.0cm ), margins: ( BetweenInfoAndSocials: 2.5mm, bottom: 3pt ), socials: ( columnGutter: 10pt ) ) // Section #let sectionStyle = ( title: ( size: 16pt, weight: "bold", fontColor: black ), margins: ( top: 3pt, RightToHLine: 2pt, ) ) // Entry #let entryStyle = ( table: ( columns: (5%, 1fr) ), title: ( size: 10pt, weight: "bold", color: black ), companyOrUniversity: ( size: 10pt, weight: "bold", color: colors.accent ), timeAndLocation: ( size: 10pt, weight: "regular", color: black ), margins: ( top: 3pt, betweenLogoAndTitle: 8pt, betweenTitleAndSubtitle: 3pt, betweenTimeAndLocation: 10pt, betweenIconAndText: 5pt ) ) // Skills #let skillsStyle = ( columns: (18%, 1fr), stroke: 1pt + colors.accent, radius: 20%, margins: ( betweenSkillTags: 6pt, betweenCategories: -6pt ) )
https://github.com/RomainPierre7/ENSEIRB-report-template
https://raw.githubusercontent.com/RomainPierre7/ENSEIRB-report-template/main/lib.typ
typst
MIT License
#let report( ) = { align(center, [ RAPPORT ]) }
https://github.com/N3M0-dev/Notes
https://raw.githubusercontent.com/N3M0-dev/Notes/main/CS/CS%3AAPP/note.typ
typst
#import "@local/note_template:0.0.1": * #set heading(numbering: "1.1") #outline(indent: true) = Cache Part == Generit Cache Memory Organization For a computre system where each memeory address has $m$ bits, the cache memory is organized as follows: 1. The cache memory is divided into $S=2^s$ sets, each containing $E$ lines. 2. Each line contains a block of $B=2^b$ bytes, a valid bit, a $t=m-(b+s)$ tag bits. So, in short, $S$ sets, $E$ lines per set, $B$ bytes per block, $t$ tag bits and $m$ bits wide memory address in total. Therefore, a cache memory organization can be caracterized by the tuple $(S, E, B, m)$. - How does all these design work? Why is cache designed like this? When the CPU need access to an address in memory, the memory address is sent to cache. The $t$ most significant bits of the address are used to determine the tag in cacheline, and the following $s$ bits are used to determine the set in cache. The remaining $b$ bits are used to determine the offset in the block. If the valid bit is set, and the tag matches, then the data is in the cache. Otherwise, the data is not in the cache, and the CPU has to access the main memory. Following the procudure above, a memory address can be mapped to a unique cacheline, and the word can be located in the cacheline by the offset. If the word is found, it is a cache hit, otherwise, it is a cache miss. Maybe the map from memory address to cache line will vary due to different CPUs, but the basic idea is the same. - Why index with the middle bits? Think about the situation that we need to traverse through a long array, if we index with the high bits, then we can only cache a block-size trunk of the array in time. In detail, let's say teh parameter $E$ is 4, so as we traverse, we can only cache 4 blocks-size of the array, causing insufficient cache usage.
https://github.com/crd2333/crd2333.github.io
https://raw.githubusercontent.com/crd2333/crd2333.github.io/main/src/docs/其它/index.typ
typst
#import "/src/components/TypstTemplate/lib.typ": * #show: project.with( title: none, lang: "zh", ) - 存储一些杂项笔记,如 + makefile + VSCode + Linux 和 CLI(zsh, powershell, terminal) + git
https://github.com/liuzhuan/reading-list
https://raw.githubusercontent.com/liuzhuan/reading-list/master/books/typescript-handbook/README.md
markdown
# TypeScript Handbook 目录 - [基本类型](#基本类型) - [变量声明](#变量声明) - [接口](#接口) - [类](#类) - [函数](#函数) - [泛型](#泛型) - [类型推断](#类型推断) - [Symbols](#Symbols) - [迭代器和生成器](#迭代器和生成器) - [模块](#模块) - [Namespaces](./namespaces.md) ## [基本类型][1] ```ts // Boolean let isDone: boolean = true; // Number let decimal: number = 6; let hex: number = 0xf00d; let binary: number = 0b1010; let octal: number = 0o744; // String let color: string = 'blue'; color = "red"; // Template String let fullName: string = `<NAME>`; let age: number = 37; let sentence: string = `Hello, my name is ${ fullName }. I'll be ${ age + 1 } years old next month.`; // Array let list: number[] = [1, 2, 3]; let list: Array<number> = [1, 2, 3]; // Tuple let x: [string, number]; x = ['hello', 10]; x = [10, 'hello']; // Error! x[0].substring(1); // OK x[1].substring(1); // Error, 'number' does not have 'substring' x[3] = 'world'; // Error console.log(x[5]); // Error // Enum enum Color { Red, Green, Blue }; let c: Color = Color.Green; enum Color { Red = 1, Green, Blue }; let c: Color = Color.Green; enum Color { Red = 1, Green, Blue }; let colorName: string = Color[2]; console.log(colorName); // Green // Any let notSure: any = 4; notSure = 'maybe a string instead'; notSure = false; let notSure: any = 4; notSure.ifItExists(); // OK notSure.toFixed(); // OK let prettySure: Object = 4; prettySure.toFixed(); // Error: Property 'toFixed' doesn't exist on type 'Object' let list: any[] = [1, true, 'free']; list[1] = 100; // Void function warnUser(): void { console.log('This is my warning message'); } let unusable: void = undefined; unusable = null; // OK if `--strictNullChecks` is not given // Null and Undefined let u: undefined = undefined; let n: null = null; ``` 默认情况下,`null` 和 `undefined` 是所有其他类型的子类型。这意味着可以把 `null` 和 `undefined` 赋值给 `number`。 但是,当设定 `--strictNullChecks` 时,`null` 和 `undefined` 只能给 `any` 和各自类型赋值(唯一例外是,`undefined` 可以给 `void` 类型赋值)。 如果想传入 `string` 或 `null` 或 `undefined`,可以使用联合类型:`string | null | undefined`。 **Never** `never` 类型表示永远不会出现的值。比如,如果一个函数总是抛出异常,或者永远不返回任何值,它的返回类型就是 `never`。 `never` 是每个类型的子类型。但其他类型不可以给 `never` 类型的变量赋值。 ```typescript function error(message: string): never { throw new Error(message); } function fail() { return error('Something failed'); } function infiniteLoop(): never { while (true) { // do something awesome } } ``` **Object** `object` 代表非基本类型。`Object.create` 可以借助 `object` 类型更好的表达: ```typescript declare function create(o: object | null): void; create({ prop: 0 }); // OK create(null); // OK create(42); // Error create('string'); // Error create(false); // Error create(undefined); // Error ``` **类型断言** 当你对类型的理解比编译器更多时,可以使用类型断言(*Type Assertions*),指引编译器做更细致的类型判断。 类型断言和其他语言的类型转换(*Type Cast*)类似,但不会对数据做校验或数据重建,对运行时也没有任何影响,只对编译器有效。 类型断言分两种方式,一种是“尖括号”: ```typescript let someValue: any = 'this is a string'; let strLength: number = (<string>someValue).length; ``` 另一种是 `as` 语法: ```typescript let someValue: any = 'this is a string'; let strLength: number = (someValue as string).length; ``` 两者是等价的,选择哪一个依个人喜好而定。但是,如果在 TypeScript 使用 JSX 时,只能使用 `as` 语法。 ## [变量声明][2] `let` 比 `var` 有更多优点,比如块级作用域,防止多次重复等。 能用 `const`,尽量用 `const`,否则,使用 `let`。尽量不使用 `var`。 ## [接口][3] TypeScript 的一个核心原则是,类型检查的重点在于检查类型的数据形状。这种方式有时也称作“鸭子🦆类型”或“结构子类型化(*structural subtyping*)”。 接口是 TypeScript 定义代码之间调用接口的有效方式。 最简单的例子: ```typescript function printLabel(labelObj: { label: string }) { console.log(labelObj.label); } let myObj = { size: 10, label: 'Size 10 Object' }; printLabel(myObj); ``` 尽管对象的属性多于函数参数的属性,但编译器仅检查必要的属性是否存在,因此不报错。 如果使用接口重写一遍,代码如下: ```typescript interface LabeledValue { label: string; } function printLabel(labelObj: LabeledValue) { console.log(labelObj.label); } let myObj = { size: 10, label: 'Size 10 Object' }; printLabel(myObj); ``` ### 可选属性 ```typescript interface SquareConfig { color?: string; width?: number; } function createSquare(config: SquareConfig): { color: string, area: number } { let newSquare = { color: 'white', area: 100 }; if (config.color) { newSquare.color = config.color; } if (config.width) { newSquare.area = config.width * config.width; } return newSquare; } let mySquare = createSquare({ color: 'black' }); ``` 可选属性的好处在于,可以防止不隶属于该接口的其他属性名,避免误操作。 ### 只读属性 有些属性只有在创建时才能写入,可以使用 `readonly` 修饰符。 ```typescript interface Point { readonly x: number; readonly y: number; } let p1: Point = { x: 10, y: 20 }; p1.x = 4; // Error, x is readonly property ``` TypeScript 自带 `ReadonlyArray<T>` 类型,它的行为如 `Array<T>` 一般,但是移除了所有的可变方法。你可以确信该数组一经创建,无法改变。 ```typescript let a: number[] = [1, 2, 3, 4]; let ro: ReadonlyArray<number> = a; ro[0] = 12; // Error ro.push(15); // Error ro.length = 100; // Error a = ro; // Error a = ro as number[]; // OK ``` `readonly` 和 `const` 的区别在于:变量使用 `const`,而属性使用 `readonly`。 ### 多余属性检测 ```typescript interface SquareConfig { color?: string; width?: number; [propName: string]: any; } function createSquare(config: SquareConfig): { color: string; area: number } { // ... } let mySquare = createSquare({ coluor: 'red', width: 100 }); ``` ### 函数类型 ```typescript interface SearchFunc { (source: string, subString: string): boolean; } let mySearch: SearchFunc; mySearch = function(src: string, sub: string): boolean { let result = src.search(sub); return result > -1; } ``` ### 可索引类型 可索引类型(*Indexable Types*)的索引签名(*Index Signature*)用于表示索引值类型和返回值类型。 ```typescript interface StringArray { [index: number]: string; } let myArray: StringArray; myArray = ['Bob', 'Fred']; let myStr: string = myArray[0]; ``` 索引签名可以是 `number` 类型,也可以是 `string` 类型。要保证 `number` 类型索引签名对应的返回值,是 `string` 索引签名的子类型。 可以用 `readonly` 修饰索引签名,表明该索引只读: ```typescript interface ReadonlyStringArray { readonly [index: number]: string; } let myArray: ReadonlyStringArray = ['Alice', 'Bob']; myArray[2] = 'Mallory'; // Error ``` ### Class 类型 可以使用 `implements` 实现严格的接口实现。 ```typescript interface ClockInterface { currentTime: Date; setTime(d: Date): void; } class Clock implements ClockInterface { currentTime: Date = new Date(); setTime(d: Date) { this.currentTime = d; } constructor(h: number, m: number) {} } ``` interface 定义了类的公共部分。 #### 类的 static 和 instance 的区别 类有两种类型:static 和 instance。当类实现一个接口时,只有 instance 部分会被检测。构造函数属于 static 部分,如果要对其检测,可以使用如下方法: ```typescript interface ClockConstructor { new (hour: number, minute: number): ClockInterface; } interface ClockInterface { tick(): void; } function createClock(ctor: ClockConstructor, hour: number, minute: number): ClockInterface { return new ctor(hour, minute); } class DigitalClock implements ClockInterface { constructor(h: number, m: number) {} tick() { console.log('beep beep'); } } class AnalogClock implements ClockInterface { constructor(h: number, m: number) {} tick() { console.log('tick tock'); } } let digital = createClock(DigitalClock, 12, 17); let analog = createClock(AnalogClock, 7, 32); ``` ### 扩展接口 接口也可以继承。 ```typescript interface Shape { color: string; } interface Square extends Shape { sideLength: number; } let square = {} as Square; square.color = 'blue'; square.sideLength = 10; ``` 接口可以扩展多个其他接口。 ```typescript interface Shape { color: string; } interface PenStroke { penWidth: number; } interface Square extends Shape, PenStroke { sideLength: number; } let square = {} as Square; square.color = 'blue'; square.sideLength = 10; square.penWidth = 5.0; ``` ### 混合类型 ```typescript interface Counter { (start: number): string; interval: number; reset(): void; } function getCounter(): Counter { let counter = (function (start: number) {}) as Counter; counter.interval = 123; counter.reset = function() {}; return counter; } let c = getCounter(); c(10); c.reset(); c.interval = 5.0; ``` ### 扩展 Class 的接口 当接口继承 class 类型时,接口会继承 class 的所有成员。甚至会继承私有和保护成员。这意味着,当接口继承了含有私有成员的类,这个接口只能被该类或该类的子类实现。 ## [类][4] 一个简单的 class 例子: ```typescript class Greeter { greeting: string; constructor(message: string) { this.greeting = message; } greet() { return `Hello, ${this.greeting}`; } } let greeter = new Greeter('world'); ``` ### 继承 来看一个简单的继承: ```typescript class Animal { move(distanceInMeters: number = 0) { console.log(`Animal moved ${distanceInMeters}m.`); } } class Dog extends Animal { bark() { console.log('Woof! Woof!'); } } const dog = new Dog(); dog.bark(); dog.move(10); dog.bark(); ``` 下面是一个复杂的继承: ```typescript class Animal { name: string; constructor(theName: string) { this.name = theName; } move(distanceInMeters: number = 0) { console.log(`${this.name} moved ${distanceInMeters}m.`); } } class Snake extends Animal { constructor(name: string) { super(name); } move(distanceInMeters = 5) { console.log('Slithering...'); super.move(distanceInMeters); } } class Horse extends Animal { constructor(name: string) { super(name); } move(distanceInMeters = 45) { console.log('Galloping...'); super.move(distanceInMeters); } } let sam = new Snake('<NAME>'); let tom: Animal = new Horse('<NAME>'); sam.move(); tom.move(34); ``` ### Public, private 和 protected 修饰符 在 TypeScript 中,每个成员默认都是 `public` 的。 可以用 `private` 标示私有成员变量。 ```typescript class Animal { private name: string; constructor(theName: string) { this.name = theName; } } new Animal('Cat').name; // Error: 'name' is private; ``` TypeScript 是一种结构类型系统。当比较不同的类型时,如果所有成员的类型都兼容,无论它们来自哪里,都可以认为这些类型都兼容。 但是,当比较的类型包含 `private` 或 `protected` 的成员变量时,比较法则会有所变化。如果一个类型包含 `private` 变量,那么其他的类型只有来自同一祖先类型时,才认为是兼容的。否则,即使结构形状一样,也不能看作是兼容的类型。 比如: ```typescript class Animal { private name: string; constructor(theName: string) { this.name = theName; } } class Rhino extends Animal { constructor() { super('Rhino'); } } class Employee { private name: string; constructor(theName: string) { this.name = theName; } } let animal = new Animal('Goat'); let rhino = new Rhino(); let employee = new Employee('Bob'); animal = rhino; animal = employee; // Error: Type 'Employee' is not assignable to type 'Animal'. ``` ### 理解 protected `protected` 同 `private` 类似,唯一的不同在于,`protected` 变量可以被子类实例访问。 ```typescript class Person { protected name: string; constructor(name: string) { this.name = name; } } class Employee extends Person { private department: string; constructor(name: string, department: string) { super(name); this.department = department; } public getElevatorPitch() { return `Hello, my name is ${this.name} and I work in ${this.department}`; } } let howard = new Employee('Howard', 'Sales'); console.log(howard.getElevatorPitch()); console.log(howard.name); // Error ``` 构造函数也可以指定为 `protected`,这意味着该类不能在包含类外实例化,但是可以扩展。 ```typescript class Person { protected name: string; protected constructor(theName: string) { this.name = theName; } } class Employee extends Person { private department: string; constructor(name: string, department: string) { super(name); this.department = department; } public getElevatorPitch() { return `Hello, my name is ${this.name} and I work in ${this.department}`; } } let howard = new Employee('Howard', 'Sales'); let john = new Person('John'); // Error: The 'Person' constructor is protected ``` ### Readonly 修饰符 只读属性只能在属性声明时,或构造函数中初始化。 ```typescript class Octopus { readonly name: string; readonly numberOfLegs: number = 8; constructor(theName: string) { this.name = theName; } } let dad = new Octopus('Man with the 8 strong legs'); dad.name = 'Man with the 3-piece suit'; // Error: name is readonly ``` ### 参数属性 参数属性(*parameter properties*)允许在同一地方对属性创建并初始化。 ```typescript class Octopus { readonly numberOfLegs: number = 8; constructor(readonly name: string) {} } ``` 除 `readonly` 外,还可以使用 `private`, `public`, `protected` 等。 ### 存取器 TypeScript 支持 getter/setter 存取器。 ```typescript const fullNameMaxLength = 10; class Employee { private _fullName: string; get fullName(): string { return this._fullName; } set fullName(newName: string) { if (newName && newName.length > fullNameMaxLength) { throw new Error(`fullName has a max length of ${fullNameMaxLength}`); } this._fullName = newName; } } let employee = new Employee(); employee.fullName = '<NAME>'; if (employee.fullName) { console.log(employee.fullName); } ``` 注意:使用存取器时,需要将 TypeScript 编译器的输出设为 ECMAScript 5 或更高版本。 ### 静态属性 ```typescript class Grid { static origin = { x: 0, y: 0 }; calculateDistanceFromOrigin(point: {x: number; y: number;}) { let xDist = point.x - Grid.origin.x; let yDist = point.y - Grid.origin.y; return Math.sqrt(xDist * xDist + yDist * yDist) / this.scale; } constructor(public scale: number) {} } let grid1 = new Grid(1.0); let grid2 = new Grid(5.0); console.log(grid1.calculateDistanceFromOrigin({ x: 10, y: 10 })); console.log(grid2.calculateDistanceFromOrigin({ x: 10, y: 10 })); ``` ### 抽象类 抽象类指哪些可以被继承,但是本身无法实例化的类。使用 `abstract` 标识。 ```typescript abstract class Animal { abstract makeSound(): void; move(): void { console.log('roaming the earth...'); } } let dog = new Animal(); // Error: Cannot create an instance of an abstract class ``` 被标示为 `abstract` 的函数,必须在子类中实现。 ```typescript abstract class Department { constructor(public name: string) {} printName(): void { console.log(`Department name: ${this.name}`); } abstract printMeeting(): void; } class AccountingDepartment extends Department { constructor() { super('Accounting and Auditing'); } printMeeting(): void { console.log('The Accounting Department meets each Monday at 10am.'); } generateReports(): void { console.log('Generating accounting reports...'); } } let department: Department; department = new Department(); // Error: 不可实例化抽象类 department = new AccountingDepartment(); department.printName(); department.printMeeting(); department.generateReports(); // Error: Department 抽象类不包含 generateReports 方法 ``` ### 高级技巧 构造函数 Constructor Functions ```typescript class Greeter { static standardGreeting = 'Hello, there'; greeting: string; greet() { if (this.greeting) { return `Hello, ${this.greeting}`; } return Greeter.standardGreeting; } } let greeter1: Greeter; greeter1 = new Greeter(); console.log(greeter1.greet()); let greeterMaker: typeof Greeter = Greeter; greeterMaker.standardGreeting = 'Hey there!'; let greeter2: Greeter = new greeterMaker(); console.log(greeter2.greet()); ``` 把 Class 当作接口 ```typescript class Point { x: number; y: number; } interface Point3d extends Point { z: number; } let point3d: Point3d = { x: 1, y: 2, z: 3 }; ``` ## [函数][5] 可选参数 ```typescript function buildName(firstName: string, lastName?: string) { if (lastName) return `${firstName} ${lastName}`; return firstName; } let result1 = buildName('Bob'); ``` 设定默认值 ```ts function buildName(firstName: string, lastName = 'Smith') { return `${firstName} ${lastName}`; } console.log(buildName("Tony")); // => <NAME> console.log(buildName("Tony", "Stark")); // => <NAME> ``` ### 收集参数 ```ts function buildName(firstName: string, ...restOfName: string[]) { return `${firstName} ${restOfName.join(' ')}`; } let employeeName = buildName('Joseph', 'Samuel', 'Lucas', 'MacKinzie'); console.log(employeeName); ``` 在函数类型中也可以使用省略号 ```ts function buildName(firstName: string, ...restOfName: string[]) { return `${firstName} ${restOfName.join(' ')}`; } let buildNameFun: (fname: string, ...rest: string[]) => string = buildName; ``` ### this 掌握如何使用 `this` 算是学习 JavaScript 路上的成人礼。 this 和箭头函数 ```typescript let deck = { suits: ['hearts', 'spades', 'clubs', 'diamonds'], cards: Array(52), createCardPicker: function() { return function() { let pickedCard = Math.floor(Math.random() * 52); let pickedSuit = Math.floor(pickedCard / 13); return { suit: this.suits[pickedSuit], card: pickedCard % 13, }; } } } let cardPicker = deck.createCardPicker(); let pickedCard = cardPicker(); console.log(`card: ${pickedCard.card} of ${pickedCard.suit}`); // => Uncaught TypeError: Cannot read property 'suits' of undefined ``` > Arrow functions capture the `this` where the function is created rather than where it is invoked. 对于箭头函数,this 的指向在定义时绑定,而不是在调用时绑定。 ```typescript let deck = { suits: ['hearts', 'spades', 'clubs', 'diamonds'], cards: Array(52), createCardPicker: function() { return () => { let pickedCard = Math.floor(Math.random() * 52); let pickedSuit = Math.floor(pickedCard / 13); return { suit: this.suits[pickedSuit], card: pickedCard % 13, }; } } } let cardPicker = deck.createCardPicker(); let pickedCard = cardPicker(); console.log(`card: ${pickedCard.card} of ${pickedCard.suit}`); // => card: 10 of hearts ``` `this` 参数 ```ts interface Card { suit: string; card: number; } interface Deck { suits: string[]; cards: number[]; createCardPicker(this: Deck): () => Card; } let deck: Deck = { suits: ['hearts', 'spades', 'clubs', 'diamonds'], cards: Array(52), createCardPicker: function(this: Deck) { return () => { let pickedCard = Math.floor(Math.random() * 52); let pickedSuit = Math.floor(pickedCard / 13); return { suit: this.suits[pickedSuit], card: pickedCard % 13 } } } } let cardPicker = deck.createCardPicker(); let pickedCard = cardPicker(); console.log(`card: ${pickedCard.card} of ${pickedCard.suit}`); ``` ## [泛型][6] 可复用性在软件工程中很重要。泛型是实现代码复用的重要手段之一。 泛型的最基本用法就是恒等函数(identity function) ```ts function identity<T>(arg: T): T { return arg; } ``` 在此,使用了类型变量(type variable)`T`。 泛型的使用方法有两种,一种是提供类型变量: ```ts let output = identity<string>('myString'); ``` 第二种方法更常用,即**类型参数推断**(type argument inference),让编译器根据实参类型,自动设置 `T` 的类型: ```ts let output = identity('myString'); ``` 编译器会把泛型当作 any 类型对待,可以和数组结合使用。 ```ts function loggingIdentity<T>(arg: T[]): T[] { console.log(arg.length); return arg; } ``` 泛型函数和接口同普通函数并没有什么不同: ```ts interface GenericIdentityFn { <T>(arg: T): T; } function identity<T>(arg: T): T { return arg; } let myIdentity: GenericIdentityFn = identity; ``` 也可以把泛型参数当作接口的一个参数,这样让调用者更明确: ```ts interface GenericIdentityFn<T> { (arg: T): T; } function identity<T>(arg: T): T { return arg; } let myIdentity: GenericIdentityFn<number> = identity; ``` 除泛型接口(generic interfaces)外,还可以定义泛型类(generic classes)。 ```ts class GenericNumber<T> { zeroValue: T; add: (x: T, y: T) => T; } let myGenericNumber = new GenericNumber<number>(); myGenericNumber.zeroValue = 0; myGenericNumber.add = function(x, y) { return x + y; }; ``` 注意,泛型类的静态成员不受泛型的影响。 如果想对泛型类型做一些约束,可以使用接口定义约束条件,然后 `extends` 该接口即可。 ```ts interface Lengthwise { length: number; } function loggingIdentity<T extends Lengthwise>(arg: T): T { console.log(arg.length); return arg; } loggingIdentity('abc'); ``` 可以声明一个受其他类型参数约束的类型参数 ```ts function getProperty<T, K extends keyof T>(obj: T, key: K) { return obj[key]; } let x = { a: 1, b: 2, c: 3, d: 4, } getProperty(x, 'a'); ``` 其中,用到了 [`typeof`][7] 关键字,这是 TypeScript 2.1 引入的特性,用来获取某类型的所有键名。 在工厂函数中使用泛型时,如下所示: ```ts class BeeKeeper { hasMask: boolean; } class ZooKeeper { nametag: string; } class Animal { numLegs: number; } class Bee extends Animal { keeper: BeeKeeper; } class Lion extends Animal { keeper: ZooKeeper; } function createInstance<A extends Animal>(c: new () => A): A { return new c(); } createInstance(Lion).keeper.nametag; createInstance(Bee).keeper.hasMask; ``` ## [枚举][8] 枚举用来定义命名的常量。 Too much details, Read It Later. ## [类型推断][9] 类型推断包括以下几种: 1. 普通推断,在变量声明、函数默认值等情况下执行 1. 上下文类型推断(contextual typing),依据类型的位置推断变量类型。 ## [Symbols][10] `symbol` 是 ES2015 新增的基本类型。通过调用 `Symbol` 构造函数创建。每个 Symbol 都是独一无二的。 ```js let sym1 = Symbol(); let sym2 = Symbol('hello'); // 可选的字符串键值 let sym3 = Symbol('hello'); console.log(sym2 == sym3); // => false console.log(sym2 === sym3); // => false ``` symbols 和字符串一样,都可以当作对象的键值。 ```js const sym = Symbol(); let obj = { [sym]: 'value' } console.log(obj[sym]); // => value ``` 还可以在类中充当成员变量名: ```js const getClassNameSymbol = Symbol(); class C { [getClassNameSymbol]() { return 'C'; } } let c = new C(); let className = c[getClassNameSymbol](); console.log(className); // => C ``` 除了用户自定义的 symbol,ES 还内置了许多知名 Symbol。比如 `Symbol.hasInstance`,`Symbol.isConcatSpreadable` 等。 ## [迭代器和生成器][11] 如果一个对象实现了 `Symbol.iterator` 属性,就可以被认为是可迭代的。`Array`, `Map`, `Set`, `String`, `Int32Array`, `Uint32Array` 等内置对象,已经自带 `Symbol.iterator` 属性。`Symbol.iterator` 函数用来返回一系列可以迭代的数值。 `for...of` 语句 ```js let someArray = [1, "string", false]; for (let entry of someArray) { console.log(entry); // => 1, "string", false } ``` `for...of` 只能在可迭代对象(`iterable`)上使用,而 `for...in` 可以在任何对象上使用。 ```js let pets = new Set(['Cat', 'Dog', 'Hamster']); pets['species'] = 'mammals'; for (let pet in pets) { console.log(pet); // => species } for (let pet of pets) { console.log(pet); // => Cat Dog Hamster } ``` 如果目标是 ES5 或 ES3 兼容语法,则 `for...of` 只能用于数组对象。否则会报错。 ## [模块][12] ### 导出 任何声明(包括但不限于变量,函数,类,类型别名或接口等),都可以通过 `export` 导出。 ```ts // StringValidator.ts export interface StringValidator { isAcceptable(s: string): boolean; } // ZipCodeValidator.ts import { StringValidator } from './StringValidator'; export const numberRegexp = /^[0-9]+$/; export class ZipCodeValidator implements StringValidator { isAcceptable(s: string) { return s.length === 5 && numberRegexp.test(s); } } ``` TODO [1]: http://www.typescriptlang.org/docs/handbook/basic-types.html "Basic Types" [2]: http://www.typescriptlang.org/docs/handbook/variable-declarations.html "Variable Declarations" [3]: http://www.typescriptlang.org/docs/handbook/interfaces.html "Interfaces" [4]: http://www.typescriptlang.org/docs/handbook/classes.html "Classes" [5]: http://www.typescriptlang.org/docs/handbook/functions.html "Functions" [6]: http://www.typescriptlang.org/docs/handbook/generics.html "Generics" [7]: http://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-1.html#keyof-and-lookup-types "keyof and Lookup Types" [8]: http://www.typescriptlang.org/docs/handbook/enums.html "Enums" [9]: http://www.typescriptlang.org/docs/handbook/type-inference.html "Type Inference" [10]: https://www.typescriptlang.org/docs/handbook/symbols.html "Symbols" [11]: https://www.typescriptlang.org/docs/handbook/iterators-and-generators.html "Iterators and Generators" [12]: http://www.typescriptlang.org/docs/handbook/modules.html "Modules"
https://github.com/ayoubelmhamdi/typst-phd-AI-Medical
https://raw.githubusercontent.com/ayoubelmhamdi/typst-phd-AI-Medical/master/template.typ
typst
MIT License
#let execption_chapter=( "RÉSUMÉ.", "INTRODUCTION GÉNÉRALE.", "RÉFÉRENCES BIBLIOGRAPHIQUES.", // "CONCLUSION.", "CONCLUSION GÉNÉRALE.", "ANNEXE.", "ANNEXE 1.", "ANNEXE 2.", ) #let execption_outline=( "REMERCIEMENTS.", "TABLE DES MATIÈRES.", ) // #let intorduction_outline=( // [Définitions.], // [Contexte et importance de la détection du cancer du poumon à l'aide de l'apprentissage en profondeur.], // [Aperçu de la structure de votre thèse.], // [Fournir un contexte et un cadre pour votre sujet de recherche.], // [Expliquer l'importance et la motivation de votre recherche.], // [Objectifs et objectifs de votre recherche.], // ) #let book( dict, body) = { let title=dict.title let authors=dict.authors let encaders=dict.encaders set page( paper: "a4", // height: 8cm , // width: 18cm, // fill: rgb("71757a"), numbering: "1", number-align: center ) // set text(lang:"fr", size: 11pt) // landspace set text(lang:"fr", size: 12pt) set document(author: authors, title: title) show heading: set text(fill: rgb("#1e045b")) set heading( numbering: "I.1.1.", ) show heading: it => pad(bottom: 0.5em, it) show heading.where(level: 1): set text(size:22pt) show heading.where(level: 2): set text(size:18pt) show heading.where(level: 3): set text(size:14pt) /* =========================================== * HEADING * ===========================================*/ show heading.where(level: 1): it => { set par(justify: true) pagebreak() if it.body.text in execption_outline { counter(heading).update(()) align( center, text( fill: rgb("#1e045b"), weight: "bold", size:22pt, it.body ) ) } // heading == REMERCIEMENTS else if it.body.text in execption_chapter { if it.numbering != none { counter(heading).update(n => n - 1) } align( center, text( weight: "bold", size:22pt, fill: rgb("#1e045b"), it.body ) + linebreak() ) } // heading == CONCLUSION. else { align( center, block( align( center, text( weight: "bold", size:18pt, fill: rgb("#FF0000"), [Chapitre ] + counter(heading).display() ) + linebreak() + v(1em) + text( weight: "bold", size:20pt, fill: rgb("#1e045b"), it.body ) ) ) ) } // heading == chapter [0-9] v(3em) } // fin L1 show heading.where(level: 2): it => locate(loc => { let levels = counter(heading).at(loc) let deepest = if levels != () { levels.last() } else { 1 } if levels.first() == 0 { text( weight: "bold", fill: rgb("#1e045b"), str(deepest) + ". " + it.body + linebreak() ) } else { it } }) /* =========================================== * OUTLINE * ===========================================*/ show outline: it => locate(loc => { set text(weight: "bold", fill: rgb("#1e045b")) let depth=3 let indent= false // let elements = query(heading, after: loc) // work in tyspt-v0.2.0 let elements = query( selector(heading).after(loc), loc,) for el in elements { if el.outlined == false { continue } if depth != none and el.level > depth { continue } let maybe_number = if el.numbering != none { numbering(el.numbering, ..counter(heading).at(el.location())) " " } let line = { // ---------------------------------- if el.level == 1 { if el.body.text in execption_chapter { // like RÉFÉRENCES BIBLIOGRAPHIQUES. text( weight: "bold", size:14pt, fill: rgb("#1e045b"), el.body ) } else { // non exeption chapters block( text( weight: "bold", size:16pt, fill: rgb("#FF0000"), [Chapitre ] + maybe_number ) + text( weight: "bold", size:16pt, fill: rgb("#1e045b"), el.body ) + v(-1.5em) ) } // ++++++++++++++++++++++++++++++++++ // fin heading L1 } else if el.level == 2 { if maybe_number.first() == "N" { h(22pt) maybe_number.trim("N.") el.body } else { maybe_number el.body } // ++++++++++++++++++++++++++++++++++ // fin heading L2 } else if el.level == 3 { text( h(22pt) + str(counter(heading).at(el.location()).at(-1)) + // [--] + maybe_number + ". " ) el.body // ++++++++++++++++++++++++++++++++++ // fin heading L3 } else{ maybe_number el.body } // ++++++++++++++++++++++++++++++++++ // fin other heading level box(width: 1fr) text(fill:black, str(counter(page).at(el.location()).first())) linebreak() } // ---------------------------------- // fin line link(el.location(), line) } }) // =========================================== // =========================================== show raw: it => { // set text(font: songti, 12pt) set align(center) set block(inset: 5pt, fill: luma(240),width:100%) // pad(0em, it) it } // ----------------------------------------------------------- // set figure( // numbering: "I.1.", // ) set figure( numbering: (..nums) => locate(loc => { numbering("I.1", counter(heading).at(loc).first(), ..nums) }) ) show figure: it => { set align(center) if it.kind == image { if it.caption != none { it.body text( weight:"bold", it.supplement + " " + it.counter.display(it.numbering)+ ": " ) text(style: "italic" ,it.caption) } else { it.body } } // image else if it.kind == "table" { set text(font: songti, size: 12pt) it.body set text(font: heiti, size: 12pt) it.supplement " " + it.counter.display(it.numbering) " " + it.caption } // table else if it.kind == "equation" { grid( columns: (20fr, 1fr), it.body, align(center + horizon, it.counter.display(it.numbering) ) ) } // equation else { it } // else figure } // ----------------------------------------------------------- show outline: set block(spacing: 1.25em) set par(justify: true) set math.equation( numbering: "(1)", ) show math.equation: set text(weight: "semibold") set page( footer: locate( loc => { if counter(page).at(loc).first() > 0 { align(center)[#counter(page).display()] } // else{ // [HIDE: ] + counter(page).display() // } }) ) body }
https://github.com/darioglasl/Arbeiten-Vorlage-Typst
https://raw.githubusercontent.com/darioglasl/Arbeiten-Vorlage-Typst/main/06_Ergebnisse/00_index.typ
typst
Dieses Kapitel umfasst die Resultate dieser Arbeit und es wird darauf eingegangen, ob die Ziele erfüllt sind. Weiter werden die noch offenen Bugs und Empfehlungen zu Erweiterungen der ... Implementation aufgezeigt. Abschliessend werden allgemeine Verbesserungsmöglichkeiten der Web-Applikation beschrieben. #include "01_ergebnisse.typ" #pagebreak() #include "02_known_bugs.typ" #pagebreak() #include "03_empfehlung.typ" #pagebreak() #include "04_empfehlung_other.typ" #pagebreak() #include "05_danksagung.typ"
https://github.com/yasemitee/Teoria-Informazione-Trasmissione
https://raw.githubusercontent.com/yasemitee/Teoria-Informazione-Trasmissione/main/2023-10-13.typ
typst
#import emoji: square = Codice di Shannon == Definizione Abbiamo trovato un modo per verificare se un codice è istantaneo (osservare i prefissi) e un modo per verificare se una serie di $n$ lunghezze positive possono essere usate come lunghezze di un codice istantaneo (disuguaglianza di Kraft), ma quale di questi codici istantanei è il migliore possibile? Andiamo a vedere un modo per *costruire* un codice istantaneo a partire dai simboli sorgente e dalle loro probabilità di essere estratti dalla sorgente. Dati il modello $angle.l Chi, p angle.r$ e $D > 1$, vogliamo trovare $n$ lunghezze positive $l_1, dots, l_n$ per costruire un codice istantaneo con le lunghezze appena trovate minimizzando $EE[l_c]$, ovvero: $ cases(limits("minimize")_(l_1, dots, l_n) limits(sum)_(i=1)^n l_i p_i, "tale che " limits(sum)_(i=1)^n D^(-l_i) lt.eq 1) $ Quello che vogliamo fare è cercare $n$ lunghezze positive $l_1, dots, l_n$ che minimizzino il valore atteso della lunghezza delle parole di codice e che soddisfino la disuguaglianza di Kraft. Abbiamo a disposizione: - $Chi = {x_1, dots, x_n}$ insieme dei simboli sorgente, con $abs(Chi) = n$; - $P = {p_i, dots, p_n}$ insieme delle probabilità, con $p_i = p(x_i)$ e $limits(sum)_(i=1)^n p_i = 1$. Vogliamo trovare $L = {l_1, dots, l_n}$ insieme delle lunghezze delle parole di codice. Andiamo ad unire la disuguaglianza di Kraft con la definizione di probabilità: infatti, sapendo che $limits(sum)_(i=1)^n p_i = 1$, andiamo a sostituire questa sommatoria al posto del valore $1$ all'interno della disuguaglianza di Kraft, ottenendo $ sum_(i=1)^m D^(-l_i) lt.eq sum_(i=1)^m p_i = 1. $ Sicuramente questa disuguaglianza vale se imponiamo $D^(-l_i) lt.eq p_i space forall i = 1, dots, n$, ma allora $ D^(l_i) dot D^(-l_i) lt.eq p_i dot D^(l_i) arrow.double.long D^(l_i) gt.eq 1 / p_i arrow.double.long l_i gt.eq log_D 1 / p_i. $ Non sempre però il logaritmo mi rappresenta delle quantità intere, quindi andiamo ad arrotondare per eccesso questo conto: $ l_i = ceil(log_D 1 / p_i). $ Abbiamo così trovato le lunghezze del mio codice istantaneo, legate in modo stretto alla probabilità di estrarre un simbolo. Il codice così trovato viene detto *codice di Shannon*, o _codice di Shannon-Fano_, e siamo sicuri che è un codice "che fa bene": infatti, usa tante parole di codice per simboli che vengono estratti raramente, e poche parole di codice per simboli che vengono estratti frequentemente. Questa proprietà viene dal fatto che il logaritmo, essendo una funzione monotona crescente, produce: - valori "grandi" quando viene calcolato con numeri "grandi", quindi quando $1/p_i$ è "grande" e quindi $p_i$ è "piccolo"; - valori "piccoli" quando viene calcolato con numeri "piccoli", quindi quando $1/p_i$ è "piccolo" e quindi $p_i$ è "grande". == Esempi === Base / migliore Supponiamo che la sorgente $S$ emetta $n = 4$ simboli con probabilità $P = {1/2, 1/4. 1/8, 1/8}$, vogliamo costruire un codice binario istantaneo. Calcoliamo le lunghezze con l'algoritmo proposto da Shannon: #v(6pt) - $l_1 = ceil(log_2 1 / (1/2)) = ceil(log_2 2) = 1$; #v(6pt) - $l_2 = ceil(log_2 1 / (1/4)) = ceil(log_2 4) = 2$; #v(6pt) - $l_(3,4) = ceil(log_2 1 / (1/8)) = ceil(log_2 8) = 3$. Calcoliamo il valore atteso come $EE[l_c] = limits(sum)_(i = 1)^4 l_i p_i = 1 dot 1/2 + 2 dot 1/4 + 3 dot 1/8 + 3 dot 1/8 = 7/4$. Il valore che abbiamo trovato è il migliore possibile? La risposta è sì, e questo vale perché tutte le probabilità sono _potenze negative_ della base $D$ scelta, nel nostro caso $D = 2$. Possiamo affermare questo perché le lunghezze $l_i$ saranno esattamente uguali a $log_D 1 / p_i$ senza eseguire nessuna approssimazione. === Degenere Supponiamo che la sorgente $S$ emetta $n = 4$ simboli con probabilità $P = {1, 0, 0, 0}$, vogliamo costruire un codice binario istantaneo. Calcoliamo le lunghezze con l'algoritmo proposto da Shannon: #v(6pt) - $l_1 = ceil(log_2 1 / 1) = ceil(log_2 1) = 0$; #v(6pt) - $l_(2,3,4) = ceil(limits(lim)_(t arrow 0^+) log_2 1 / t) = ceil(lim_(t arrow 0^+) log_2 +infinity) = +infinity$. Calcoliamo il valore atteso come $EE[l_c] = limits(sum)_(i = 1)^4 l_i p_i = 0 dot 1 + underbracket(3 dot 0 dot +infinity, 0 "per " t arrow 0^+)$ = 0. === Equiprobabile Supponiamo che la sorgente $S$ emetta $n = 4$ simboli con probabilità $P = {1/n, 1/n, 1/n, 1/n}$, vogliamo costruire un codice binario istantaneo. Calcoliamo le lunghezze con l'algoritmo proposto da Shannon: #v(6pt) - $l_(1,2,3,4) = ceil(log_2 1 / (1 / 4)) = ceil(log_2 4) = 2$. Calcoliamo il valore atteso come $EE[l_c] = limits(sum)_(i = 1)^4 l_i p_i = 2 dot 1 / 4 + 2 dot 1 / 4 + 2 dot 1 / 4 + 2 dot 1 / 4 = 2$. == Entropia Nel codice di Shannon andiamo a definire $l_i = ceil(log_D 1 / p_i)$, quindi sostituiamo $l_i$ nel valore atteso che stiamo cercando di minimizzare, ottenendo $EE[l_c] = limits(sum)_(i=1)^n p_i log_D 1 / p_i$. La quantità che abbiamo appena scritto si chiama *entropia*, e la usiamo perché è in stretta relazione con la codifica ottimale che possiamo realizzare. In particolare, l'entropia è il _limite inferiore_ alla compattezza del codice. Tutti i valori attesi che abbiamo calcolato negli esempi precedenti sono anche le entropie delle varie sorgenti, ma che valori assume questa entropia? Sicuramente è una quantità _positiva o nulla_, visto la somma di prodotti di fattori _positivi o nulli_. Inoltre, raggiunge il proprio massimo quando la distribuzione di probabilità è *uniforme*, e vale $ sum_(i=1)^n p_i log_D 1/p_i = sum_(i=1)^n 1/m log_D m = cancel(m) dot 1 / cancel(m) dot log_D m = log_D m. $ In questo caso otteniamo un _albero perfettamente bilanciato_ con le codifiche a livello delle ultime foglie. Questo mi va a dire che il codice è _compatto_ e bilanciato, non spreco bit, mentre in altre situazioni ho un albero _sbilanciato_ e potrei perdere dei bit. #pagebreak() = Codice di Huffman Supponiamo che la sorgente $S$ emetta $n = 7$ simboli con probabilità $P = {0.3, 0.2, 0.2, 0.1, 0.1, 0.06, 0.04}$, vogliamo costruire un codice ternario istantaneo. Qui abbiamo due diversi approcci: - codice di Shannon: otteniamo come lunghezze $2,2,2,3,3,3,3$; - _grafico_: dato un albero ternario, associamo ai nodi più in alto le parole di codice dei simboli con probabilità maggiore. #v(12pt) #figure( image("assets/2023-10-13_approccio-grafico.svg", width: 100%) ) #v(12pt) Nell'immagine vediamo come prima inseriamo i simboli con probabilità $0.3$ e $0.2$, poi come terzo nodo mettiamo un "checkpoint" e iniziamo la procedura di nuovo da quest'ultimo nodo. == Definizione Abbiamo in realtà un terzo approccio al problema precedente, ideato da *<NAME>* nel 1953, che lavora nel seguente modo: + ordino le probabilità in ordine decrescente; + le ultime $D$ probabilità sono sostituite dalla loro somma, e i simboli corrispondenti sono sostituiti da un simbolo "fantoccio", creando una nuova sorgente "fantoccia"; + ripeto dal punto $1$ fino a quando non si raggiungono $t$ probabilità, con $t lt.eq D$; + scrivo il codice di Huffman facendo un "rollback" alla sorgente iniziale. Il codice così creato è detto *codice di Huffman*, ed è il _codice istantaneo ottimo_ che possiamo costruire. Supponiamo che la sorgente $S$ emetta $n = 7$ simboli $s_1, dots, s_7$ con probabilità $P = {0.3, 0.2, 0.2, 0.1, 0.1, 0.06, 0.04}$, vogliamo costruire un codice ternario di Huffman. #v(12pt) #figure( image("assets/2023-10-13_huffman-compressione.svg", width: 65%) ) #v(12pt) In questa prima fase andiamo a "compattare" le probabilità minori fino ad avere una situazione con esattamente $D = 3$ simboli. #v(12pt) #figure( image("assets/2023-10-13_huffman-generazione.svg", width: 65%) ) #v(12pt) Nella seconda fase invece andiamo ad eseguire un "rollback" delle compressioni, sostituendo ad ogni nodo "compresso" le vecchie probabilità, andando quindi a costruire l'_albero_ di codifica. #v(12pt) #figure( image("assets/2023-10-13_albero-huffman.svg", width: 75%) ) #v(12pt) Supponiamo ora che la sorgente $S$ emetta $n = 8$ simboli $s_1, dots, s_7, s_8$ con probabilità $P = {0.3, 0.2, 0.2, 0.1, 0.1, 0.06, 0.02, 0.02}$, vogliamo costruire un codice ternario di Huffman. Considerando che ad ogni iterazione perdiamo $D = 3$ simboli e ne aggiungiamo uno, in totale perdiamo $D - 1$ simboli. Considerando che l'algoritmo genera il codice istantaneo ottimo quando termina con esattamente $D$ probabilità, in questo esempio alla fine delle iterazioni ci troveremmo con solo due simboli sorgente, e non tre, quindi la codifica non risulta ottimale. #v(12pt) #figure( image("assets/2023-10-13_albero-huffman-errato.svg", width: 75%) ) #v(12pt) Infatti, notiamo come alla radice perdiamo un ramo, andando ad aumentare di conseguenza l'altezza dell'albero. La soluzione proposta da Huffman consiste nell'inserire un numero arbitrario di simboli "fantoccio" con probabilità nulla, così da permettere poi un'ottima "compressione" fino ad avere $D$ simboli. Ma quanti simboli nuovi dobbiamo inserire? Supponiamo di partire da $n$ simboli e rimuoviamo ogni volta $D - 1$ simboli: $ n arrow.long n - (D - 1) arrow.long n - 2 dot (D - 1) arrow.long dots arrow.long n - t dot (D - 1). $ Chiamiamo $square.black = n - t dot (D - 1)$. Siamo arrivati ad avere $square.black$ elementi, con $square.black lt.eq D$, ma noi vogliamo esattamente $D$ elementi per avere un albero ben bilanciato e senza perdita di rami, quindi aggiungiamo a $square.black$ un numero $k$ di elementi tali per cui $square.black + k = D$. Supponiamo di eseguire ancora un passo dell'algoritmo, quindi da $square.black + k$ andiamo a togliere $D - 1$ elementi, lasciando la sorgente con un solo elemento. Cosa abbiamo ottenuto? Ricordando che $square.black = n - t dot (D - 1)$, abbiamo fatto vedere che: $ square.black + k - (D - 1) &= 1 \ n - t dot (D - 1) + k - (D - 1) &= 1 \ n + k - (t + 1) dot (D - 1) &= 1. $ In poche parole, il numero $n$ di simboli sorgente, aggiunto al numero $k$ di simboli "fantoccio", è congruo ad $1$ modulo $D - 1$, ovvero $ n + k equiv 1 mod D - 1. $ Ripetiamo l'esempio precedente, aggiungendo il simbolo $s_9$ ad $S$ con probabilità $0$. #v(12pt) #figure( image("assets/2023-10-13_albero-huffman-esteso.svg", width: 75%) ) #v(12pt) Con che ordine vado a inserire i simboli "compressi" dentro la lista delle probabilità? In modo _random_, quindi non è detto che il codice generato sia unico e ottimo.
https://github.com/piepert/philodidaktik-hro-phf-ifp
https://raw.githubusercontent.com/piepert/philodidaktik-hro-phf-ifp/main/src/parts/ephid/ziele_und_aufgaben/anforderungsbereiche.typ
typst
Other
#import "/src/template.typ": * == #ix("Anforderungsbereiche", "Anforderungsbereich") Die Anforderungsbereiche sind durch die EPA festgelegt und erklären, in welchen Dimensionen die Aufgaben des Philosophieunterrichts stattfinden sollen:#en[@KMK2006_EPAPhil] #orange-list-with-body[*Anforderungsbereich I -- Problemerfassung*][ Das philosophische Problem wird erkannt und die Schwerpunkte für die folgende Bearbeitung charakterisiert. Das Problem wird begrifflich bestimmt und systematisch eingeordnet und abgegrenzt. Liegt ein präsentatives Material vor, ist eine Interpretation des Materials vorzunehmen und das philosophische Problem diskursiv zu formulieren. Die Form der Präsentation ist -- sofern die SuS freie Entscheidung hatten -- zu begründen. ][*Anforderungsbereich II -- Problembearbeitung*][ Die philosophischen Argumente und der Argumentationsgang werden untersucht und reflektiert. Die Form der Argumentation oder Präsentation sind mit einzubeziehen. Wird die Aufgabe präsentativ bearbeitet, muss sich das Produkt plausibel und transparent auf die philosophische Idee beziehen. Das präsentative Produkt kann um Kommentare erweitert werden. ][*Anforderungsbereich III -- Problemverortung*][ Es erfolgt eine Positionierung der SuS, in der der Problemkontext auf den eigenen Standpunkt bezogen wird. Dazu kommen additiv#en[additiv: Wenn A, B, und C additiv auftreten, treten sie alle zusammen auf.] oder alternativ#en[alternativ: Wenn A, B, und C alternativ auftreten, tritt mindestens eins von ihnen auf.]: - eine Beurteilung des Problems - eine resümierende Stellungnahme - eine Neubestimmung des Problems - Perspektiven zur weiteren Bearbeitung - eine Modifikation erörterter Positionen - die Reflexion des präsentativen Bearbeitungsprozesses - ... ] Der dritte Anforderungsbereich kann die anderen beiden beinhalten.
https://github.com/DashieTM/ost-5semester
https://raw.githubusercontent.com/DashieTM/ost-5semester/main/blockchain/weeks/week3.typ
typst
#import "../../utils.typ": * #section("JWT Token") #columns( 2, [ #align( center, [#image("../../Screenshots/2023_10_02_10_55_11.png", width: 100%)], ) #colbreak() A jwt token is created with a header, the payload and a secret key, which together combines into a hash that will be stored. ], ) #align( center, [#image("../../Screenshots/2023_10_02_10_56_43.png", width: 80%)], ) #subsection("Access token and refresh token") #columns(2, [ - access has short lifetime -> 20 min etc. - access token is used to access content on website -> authentication #colbreak() - refresh token has long lifetime -> 6 months etc. - refresh token is used to generate a new access token easier ]) #section("Load Balancing") - horizontal scaling -> distribution of workloads - ensures high availability -> hardware failure might not impact website as load balancer will redistribute requests to other servers - example FOSS load balancer: #link("https://github.com/caddyserver/caddy")[Caddy] #align( center, [#image("../../Screenshots/2023_10_02_11_01_30.png", width: 80%)], ) Example Caddy config//typstfmt::off ```rs // #Caddyfile // :7070 // reverse_proxy * { // to http://dsy-services-1:8080 // to http://dsy-services-2:8080 // to http://dsy-services-3:8080 // to http://dsy-services-4:8080 // to http://dsy-services-5:8080 // } // lb_policy round_robin // lb_try_duration 1s // lb_try_interval 100ms // fail_duration 10s // unhealthy_latency 1s ``` //typstfmt::on #section("CORS Cross Origin Resource Sharing") - dev solution: //typstfmt::off ```js w.Header().Set("Access-Control-Allow-Origin","*") ``` //typstfmt::on - proper solutions: - use reverse proxy - allow specific CORS: Access-Control-Allow-Origin: https://foo.example #section("Containers") #subsection("OverlayFS") special filesystem that "merges" two filesystems with one being writeable and one being read-only -> this is useful for ISO bootable sticks with persistent storage or docker containers. #subsection("Cgroups") Allows you to split your cpu to different tasks while defining how much of the cpu each task will be allowed to use. //typstfmt::off ```bash ls /sys/fs/cgroup sudo apt install cgroup-tools / yay -S libcgroup cgcreate -g cpu:red cgcreate -g cpu:blue echo -n "20" > /sys/fs/cgroup/blue/cpu.weight echo -n "80" > /sys/fs/cgroup/red/cpu.weight cgexec -g cpu:blue bash cgexec -g cpu:red bash ``` //typstfmt::on #subsection("Seperate Networks") Linux network spaces provides isolation for each task: //typstfmt::off ```bash ip netns add testnet ip netns list #Create virtual ethernet connection ip link add veth0 type veth peer name veth1 netns testnet ip link list #? ip netns exec testnet <cmd> #Configure network ip addr add 10.1.1.1/24 dev veth0 ip netns exec testnet ip addr add 10.1.1.2/24 dev veth1 ip netns exec testnet ip link set dev veth1 up ``` //typstfmt::on
https://github.com/swablab/documents
https://raw.githubusercontent.com/swablab/documents/main/satzung.typ
typst
Creative Commons Zero v1.0 Universal
#import "templates/tmpl_doc.typ": tmpl_doc #show: doc => tmpl_doc( title: "Satzung", changes: ( [v1.0], [19.10.2020], [erste Fassung], ), doc, ) #let title_group(title) = { show heading: it => [ #set text(font: "Ubuntu", 24pt) #strong[#it.body] #v(1.25em, weak: true) ] heading(title, numbering: none) } #title_group("Allgemeines") = Name, Sitz, Eintragung, Geschäftsjahr + Der Verein führt den Namen swablab und soll in das Vereinsregister eingetragen werden. Nach der Eintragung führt er den Namen swablab e.V. + Der Verein hat seinen Sitz in Freudenstadt. Geschäftsjahr des Vereins ist das Kalenderjahr. = Zweck Der Verein ist demokratisch, parteipolitisch neutral, überkonfessionell und unabhängig. Er ist zur Zusammenarbeit mit allen Organisationen befugt, wenn dies dem Vereinszweck dient und sein Bestehen sowie seine Neutralität nicht gefährdet. Der Zweck des Vereins ist: #block[ #set enum(numbering: "1.", indent: 2em) + Die Förderung der Volksbildung, welche durch Bildungs- und Fortbildungsmaßnahmen sowie durch interdisziplinären Wissensaustausch durchgeführt wird + Die Bewahrung und Entfaltung handwerklicher, kultureller und sozialer Fähigkeiten + Die Förderung einer umwelt- und sozialverträglichen Lebens- und Wirtschaftsweise ] Diese Satzungszwecke werden verwirklicht durch: #block[ #set enum(numbering: "1.", indent: 2em) + Die Einrichtung einer offenen Werkstatt für die Mitglieder und jeden ernstlich Interessierten, als Ort zum Erfahrungsaustausch, Experimentieren und anwendungsorientierten Erlernen von Fertigkeiten + Das Durchführen von Vorträgen, Workshops, Diskussions- und Informationsveranstaltungen + Öffentlichkeitsarbeit in allen Medien ] #pagebreak() = Gemeinnützigkeit + Der Verein verfolgt ausschließlich und unmittelbar gemeinnützige Zwecke im Sinn des Abschnitts"Steuerbegünstigte Zwecke\" der Abgabenordnung in der jeweils gültigen Fassung.\ + Der Verein ist selbstlos tätig; er verfolgt nicht in erster Linie eigenwirtschaftliche Zwecke.\ + Alle Vereinsämter werden ehrenamtlich ausgeübt. = Mittel des Vereins + Mittel des Vereins dürfen nur für die satzungsgemäßen Zwecke verwendet werden. Die Mitglieder erhalten keine Zuwendungen aus Mitteln des Vereins. Es darf keine Person durch Ausgaben, die dem Zweck des Vereins fremd sind, oder durch unverhältnismäßig hohe Vergütungen begünstigt werden. + Der Verein erhebt einen Beitrag. Das Nähere regelt eine Beitragsordnung, die von der Mitgliederversammlung beschlossen wird. + Bei Erwerbsminderung kann ein verminderter Beitragssatz gemäß Beitragsordnung festgesetzt werden. Hierüber entscheidet der Vorstand. #pagebreak() #title_group("Mitgliedschaft") = Eintritt und Austritt der Mitglieder + Ordentliche Mitglieder können natürliche Personen über 18 Jahren werden. Außerdem können juristische Personen, Handelsgesellschaften, nicht rechtsfähige Vereine sowie Anstalten und Körperschaften des öffentlichen Rechts ordentliche Mitglieder werden. + Natürliche Personen können ab dem 12. Lebensjahr jugendliche Mitglieder werden. Hierfür ist die Zustimmung eines gesetzlichen Vertreters notwendig. Jugendliche Mitglieder sind zur Teilnahme an den Mitgliederversammlungen ohne Antrags- und Stimmrecht berechtigt. Mit erreichen des 18. Lebensjahres werden jugendliche Mitglieder zu ordentlichen Mitgliedern. + Die Beitrittserklärung erfolgt schriftlich oder per E-Mail gegenüber dem Vorstand. + Über die Annahme der Beitrittserklärung entscheidet der Vorstand. Die Mitgliedschaft beginnt mit der Annahme der Beitrittserklärung. + Die Mitgliedschaft endet durch Austrittserklärung, durch Tod von natürlichen Personen oder durch Auflösung und Erlöschen von juristischen Personen, Handelsgesellschaften, nicht rechtsfähigen Vereinen sowie Anstalten und Körperschaften des öffentlichen Rechts oder durch Ausschluss; die Beitragspflicht für den laufenden Monat bleibt hiervon unberührt. + Der Austritt wird durch schriftliche Willenserklärung mit einer Frist von mindestens einem Monat zum Monatsende gegenüber dem Vorstand vollzogen. + Die Mitgliederversammlung kann solche Personen, die sich besondere Verdienste um den Verein oder um die von ihm verfolgten satzungsgemäßen Zwecke erworben haben, zu Ehrenmitgliedern ernennen. Ehrenmitglieder haben alle Rechte eines ordentlichen Mitglieds. Sie sind von Beitragsleistungen befreit. + Fördermitglieder unterstützen den Verein ideell und finanziell. Die Fördermitgliedschaft wird vom Vorstand auf Antrag vergeben. Die fördernde Mitgliedschaft berechtigt zur Teilnahme an den Mitgliederversammlungen ohne Antrags- und Stimmrecht. Fördermitglieder entrichten einen Mitgliedsbeitrag, näheres ist in der Beitragsordnung geregelt. + Ein Mitglied kann durch Beschluss des Vorstandes ausgeschlossen werden, wenn es das Ansehen des Vereins schädigt, seinen Beitragsverpflichtungen nachhaltig nicht nachkommt oder wenn ein sonstiger wichtiger Grund vorliegt. Der Vorstand muss dem auszuschließenden Mitglied den Beschluss in schriftlicher Form unter Angabe von Gründen mitteilen und ihm auf Verlangen eine Anhörung gewähren. + Gegen den Beschluss des Vorstandes ist innerhalb einer Frist von zwei Monaten nach Zugang des Ausschließungsbeschlusses die Anrufung der Mitgliederversammlung zulässig. Bis zum Beschluss der Mitgliederversammlung ruht die Mitgliedschaft. Die Mitgliederversammlung entscheidet endgültig über den Ausschluss. = Rechte und Pflichten der Mitglieder + Die Mitglieder sind berechtigt, die Leistungen des Vereins in Anspruch zu nehmen. + Die Mitglieder sind verpflichtet, die satzungsgemäßen Zwecke des Vereins zu unterstützen und zu fördern. Sie sind verpflichtet, die festgesetzten Beiträge zu zahlen. #pagebreak() #title_group("Verein") = Organe des Vereins Die Organe des Vereins sind: #block[ #set enum(numbering: "1.", indent: 2em) + Vorstand + Mitgliederversammlung ] = Der Vorstand + Der Vorstand besteht aus mindestens zwei und höchstens fünf Personen. + Jedes Vorstandsmitglied ist einzelvertretungsberechtigt im Sinne von § 26 BGB bei Rechtsgeschäften bis zu einem Höchstbetrag von 500 EURO. + Bei Rechtsgeschäften über 500 EURO ist die Vertretung durch zwei Vorstandsmitglieder erforderlich. + Über die interne Aufgabenverteilung entscheidet der Vorstand und gibt diese der Mitgliederversammlung bekannt. + Der Vorstand wird von der Mitgliederversammlung auf die Dauer von zwei Jahren, vom Tage der Wahl an gerechnet, gewählt. Die jeweils amtierenden Vorstandsmitglieder bleiben nach Ablauf ihrer Amtszeit solange im Amt, bis ihre Nachfolger gewählt sind und ihr Amt antreten können. Jedes Vorstandsmitglied ist einzeln zu wählen. Wählbar sind nur ordentliche Vereinsmitglieder. Fällt mit dem Ausscheiden eines Vorstandsmitgliedes die Anzahl der Vorstandsmitglieder unter zwei Personen, so ist unverzüglich eine außerordentliche Mitgliederversammlung einzuberufen, bei der mindestens ein neues Vorstandsmitglied gewählt werden muss. + Der Vorstand fasst seine Beschlüsse im Allgemeinen in Vorstandssitzungen. Vorstandssitzungen werden schriftlich oder per E-Mail einberufen und finden mindestens quartalsmässig statt. In jedem Fall ist die Einberufungsfrist von einer Woche einzuhalten. Der Mitteilung einer Tagesordnung bedarf es nicht. Der Vorstand ist beschlussfähig, wenn mindestens zwei Vorstandsmitglieder anwesend sind. Bei der Beschlussfassung entscheidet die Mehrheit der abgegebenen gültigen Stimmen. Bei Stimmengleichheit gilt der Antrag als abgelehnt. Die Beschlüsse des Vorstandes sind zu Beweiszwecken schriftlich festzuhalten. Die Niederschrift soll Ort und Zeit der Vorstandssitzung, die Namen der Teilnehmer, die gefassten Beschlüsse und die Abstimmungsergebnisse enthalten. Ein Vorstandsbeschluss kann auf schriftlichem Wege gefasst werden, wenn alle Vorstandsmitglieder ihre Zustimmung zu der zu beschließenden Regelung erklären. Die Vereinigung mehrerer Vorstandsämter auf eine Person ist unzulässig. = Die Mitgliederversammlung + Ordentliche Mitglieder haben in der Mitgliederversammlung ein volles Stimmrecht. + Oberstes Beschlussorgan ist die Mitgliederversammlung. Ihrer Beschlussfassung unterliegen alle in dieser Satzung oder Gesetz vorgesehenen Gegenstände, insbesondere #block[ #set enum(numbering: "1.", indent: 2em) + die Genehmigung des Finanzberichtes, + die Entlastung des Vorstandes, + die Wahl und die Abberufung der Vorstandsmitglieder, + die Bestellung von Finanzprüfern, die nicht dem Vorstand angehören dürfen, jedoch nicht zwingend Mitglieder des Vereins sein müssen, + Satzungsänderungen, + die Genehmigung der Beitragsordnung, + die Genehmigung des Haushaltsplanes, + die Richtlinie über die Erstattung von Reisekosten und Auslagen, + Beschlüsse über Anträge des Vorstandes und der Mitglieder, + die Ernennung von Ehrenmitgliedern, + Ausschluss von Mitgliedern, + die Auflösung des Vereins und die Beschlussfassung über die eventuelle Fortsetzung des aufgelösten Vereins. ] + Die ordentliche Mitgliederversammlung findet einmal im Jahr statt. Außerordentliche Mitgliederversammlungen werden auf Beschluss des Vorstandes abgehalten, wenn die Interessen des Vereins dies erfordern, oder wenn mindestens 10\% der Mitglieder dies unter Angabe des Zwecks und der Gründe schriftlich beantragen. Der Vorstand hat dann innerhalb einer Frist von sechs Wochen die Mitgliederversammlung durchzuführen. + Die Einberufung der Mitgliederversammlung erfolgt schriftlich oder per E-Mail durch ein Vorstandsmitglied mit einer Frist von mindestens einer Woche. Hierbei sind die Tagesordnung bekannt zu geben und die nötigen Informationen zugänglich zu machen. Anträge zur Tagesordnung sind mindestens 2 Tage vor der Mitgliederversammlung beim Vorstand einzureichen. Über die Behandlung von Initiativanträgen entscheidet die Mitgliederversammlung. + Jede satzungsmäßig einberufene Mitgliederversammlung ist beschlussfähig, wenn 25\% aller Mitglieder anwesend sind. Besteht für eine einberufene Mitgliederversammlung Beschlussunfähigkeit, ist der Vorstand berechtigt, eine zweite Versammlung mit der gleichen Tagesordnung einzuberufen, die ohne Rücksicht auf die Zahl der anwesenden Mitglieder beschlussfähig ist. + Beschlüsse über Satzungsänderungen und über die Auflösung des Vereins können nur in einer Mitgliederversammlung gefasst werden, in der diese Tagesordnungspunkte mindestens zwei Wochen vor der Mitgliederversammlung ausdrücklich angekündigt worden sind. Solche Beschlüsse bedürfen zu ihrer Rechtswirksamkeit der Dreiviertelmehrheit der anwesenden Mitglieder. + Vorbehaltlich Absatz 6 bedürfen die Beschlüsse einer Mitgliederversammlung der einfachen Mehrheit der Stimmen der anwesenden Mitglieder. + Jedes Mitglied hat eine Stimme. Juristische Personen haben einen Stimmberechtigten schriftlich zu bestellen. + Jedes ordentliche Mitglied kann sich durch ein anderes, anwesendes ordentliches Mitglied vertreten lassen. Jedes anwesende ordentliche Mitglied kann, zusätzlich zu seiner eigenen Stimme, die Stimme maximal eines weiteren ordentlichen Mitglieds in Vertretung übernehmen. Die Vollmacht bedarf der Schriftform und muss dem Versammlungsleiter übergeben werden. Eine Einschränkung der Vollmacht durch den Bevollmächtigenden ist nicht möglich. + Die Mitgliederversammlung wird von einem vom Vorstand bestimmten Versammlungsleiter geleitet. + Auf Antrag eines Mitglieds ist geheim abzustimmen. Über die Beschlüsse der Mitgliederversammlung ist ein Protokoll anzufertigen, das vom Versammlungsleiter und dem Schriftführer zu unterzeichnen ist. Das Protokoll ist allen Mitgliedern zugänglich zu machen. #pagebreak() = Auflösung des Vereins und Anfallberechtigung + Die Auflösung des Vereins kann nur in einer Mitgliederversammlung mit der festgelegten Stimmenmehrheit beschlossen werden. + Die Mitgliederversammlung bestimmt mindestens zwei gemeinsam vertretungsberechtigte Liquidatoren. Diese Vorschriften gelten entsprechend für den Fall, dass der Verein aus einem anderen Grund aufgelöst wird oder seine Rechtsfähigkeit verliert. + Bei Auflösung des Vereins oder bei Wegfall steuerbegünstigter Zwecke fällt das Vermögen des Vereins an Lebenshilfe Horb-Sulz e.V. mit Sitz in Horb am Neckar welche es unmittelbar und ausschließlich für gemeinnützige oder mildtätige Zwecke zu verwenden hat. Sollte diese Stiftung bei Auflösung des Vereins nicht oder nicht mehr gemeinnützig sein, fällt das Vereinsvermögen an eine andere von der Mitgliederversammlung zu bestimmende steuerbegünstigte Körperschaft, die das Vermögen zur Förderung des Wohlfahrtswesens zu verwenden hat. = Haftung des Vereins + Die Haftung des Vereins aus jeder rechtsgeschäftlichen Tätigkeit seiner Organe und seiner Vertreter ist in allen Fällen auf das vorhandene Vermögen des Vereins beschränkt. Eine darüberhinausgehende persönliche Haftung der einzelnen Mitglieder oder Organe ist ausgeschlossen. #pagebreak() #title_group("Gründungsmitglieder") #block[ #set enum(numbering: it => "") + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) + <NAME> (Unterschrift): #box(width: 1fr, repeat("_")) ]
https://github.com/kazewong/lecture-notes
https://raw.githubusercontent.com/kazewong/lecture-notes/main/Engineering/SoftwareEngineeringForDataScience/lab/backend.typ
typst
#set page( paper: "us-letter", header: align(center, text(17pt)[ *Building an API server* ]), numbering: "1", ) #import "./style.typ": style_template #show: doc => style_template(doc,) = Foreword In my experience, collecting and interfacing with data are usually by far the most difficult part of a project, and the single most significant factor which determine whether some downstream application will work or not. = Building a minimal server with flask == RESTful API == Building routes == Environment variables = Exporting neural network models = Integrating docker service
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/ops-12.typ
typst
Other
// Error: 3-6 cannot mutate a constant: box #(box = 1)
https://github.com/maucejo/presentation_polylux
https://raw.githubusercontent.com/maucejo/presentation_polylux/main/src/presentation-template.typ
typst
MIT License
#import "@preview/polylux:0.3.1": * #import "_boxes.typ": * #import "_slides.typ": * #let presentation( aspect-ratio: "16-9", title: [Title], short-title: "", author: none, laboratory: "", lang: "fr", logo: image("resources/assets/logo_cnam_lmssc.png"), footer-logo: image("resources/assets/lecnam.png"), font: "Lato", math-font: "Lete Sans Math", body ) = { set text(font: font, weight: config.weight, size: config.text-size, number-type: "lining", lang: lang) set strong(delta: 200) set par(justify: true) set page( paper: "presentation-" + aspect-ratio, margin: 0em, header: none, footer: none, fill: colors.gray.lighten(95%), ) // localization let localization = json("resources/i18n/fr.json") if lang == "en" { localization = json("resources/i18n/en.json") } show math.equation: set text(font: math-font, weight: config.weight, stylistic-set: 1) set list(marker: ([#text(fill:colors.red)[#sym.bullet]], [#text(fill:colors.red)[#sym.triangle.filled.small.r]])) set enum(numbering: n => text(fill:colors.red)[#n.]) states.mlogo.update(logo) states.flogo.update(footer-logo) states.title.update(title) states.stitle.update(short-title) states.author.update(author) states.labo.update(laboratory) states.localization.update(localization) body }
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/closure-18.typ
typst
Other
// Error: 10-15 expected identifier, found string #let foo("key": b) = key
https://github.com/Area-53-Robotics/53B-Notebook-Over-Under-2023-2024
https://raw.githubusercontent.com/Area-53-Robotics/53B-Notebook-Over-Under-2023-2024/master/templates/competition.typ
typst
Creative Commons Attribution Share Alike 4.0 International
// FROM NOTEBOOKINATOR /// A Series of tables displaying match data from a tournament. Useful for tournament analysis entries. /// /// - match (string): The name of the match /// - red_alliance (dictionary): The red alliance /// - blue_alliance (dictionary): The blue alliance /// - won (boolean): Whether you won the match /// - auton (boolean): Whether you got the autonomous bonus /// - awp (boolean): Whether you scored the autonomous win point /// - notes (content): Any additional notes you have about the match /// -> content #let tournament(matches: (( match: "", red_alliance: (teams: ("", ""), score: 0), blue_alliance: (teams: ("", ""), score: 0), won: false, auton: false, awp: false, notes: [], ),)) = { for match in matches { let color = if match.won { green } else { red } let cell = rect.with(fill: color.lighten(80%), width: 100%, height: 30pt) let header_cell = cell.with(fill: color, height: 20pt) let alliance_info(alliance: none) = { cell[ #grid(columns: (1fr, 1fr), [ #alliance.teams.at(0) \ #alliance.teams.at(1) \ ], [ #set text(size: 15pt) #set align(horizon + center) #alliance.score ]) ] } grid( columns: (1fr, 1fr, 1fr), header_cell(radius: (top-left: 1.5pt))[*Match*], header_cell[*Red Alliance*], header_cell[*Blue Alliance*], cell[#match.match], alliance_info(alliance: match.red_alliance), alliance_info(alliance: match.blue_alliance), ) if not match.at("notes", default: none) == none [ === Notes #match.notes ] else [ ] } }
https://github.com/rabotaem-incorporated/algebra-conspect-1course
https://raw.githubusercontent.com/rabotaem-incorporated/algebra-conspect-1course/master/sections/01-number-theory/05-gcd.typ
typst
Other
#import "../../utils/core.typ": * == Наибольший общий делитель #def[ $R$ --- коммутативное кольцо, $a, b in R$. Элемент $d$ называется наибольшим общим делителем, если: + $d divides a, space d divides b$ + $d' divides a, space d' divides b ==> d' divides d$ ] #pr[ + $d_1, space d_2$ --- наибольшие общие делители, тогда $d_1 sim d_2$. + Пусть $d_1$ --- наибольший общий делитель, $d_2 sim d_1$, тогда $d_2$ --- тоже наибольший общий делитель. ] #proof[ + По свойству 2 : $d_1 divides d_2, space d_2 divides d_1 ==> d_1 sim d_2$. + $d_2 divides d_1, space d_1 divides a, space d_1 divides b ==> d_2 divides a, space d_2 divides b$ Пусть $d_2$ не наибольший, тогда $exists d' > d_2$. $d' divides a, space d' divides b ==> d' divides d_1$, так как $d_1$ наибольший общий делитель, $d' divides d_1, space d_1 divides d_2 ==> d' divides d_2$, противоречие, так как $d' > d_2$. ] #pr[ Пусть $a, b in ZZ ==>$ + $exists d in ZZ: space a ZZ + b ZZ = d ZZ$, иначе говоря: $forall x, y in ZZ space exists d, z in ZZ: a x + b y = d z$ + при этом $d$ --- наибольший общий делитель $a, b$. ] #proof[ + Пусть $I = a ZZ + b ZZ$. Заметим что $0 in I$, так как $0a + 0b = 0$. Если $I = {0}$, то $I = 0 ZZ$. Иначе $I eq.not {0} ==> c in I ==> -c in I$, так как $-(a x + b y) = a dot.c -x + b dot.c -y$ То есть в $I$ есть положительные числа. Пусть $d = \min{ c divides c in I, space c > 0 }$, и докажем что $a ZZ + b ZZ = d ZZ$. "$supset$": $d in I$ (по определению) $==> d = a x_0 + b y_0, space.quad x_0, y_0 in ZZ ==>$ $forall z in ZZ: space d z = a(x_0z) + b(y_0z) in I$, значит $d ZZ subset a ZZ + b ZZ$ "$subset$": Пусть $c in I, space d in NN ==> exists q, r in ZZ: space c = d q + r, space.quad 0 <= r < d$ $c in I$, значит $c = a x_1 + b y_1, space.quad x_1, y_1 in ZZ$ Мы уже знаем, что $d in I$, значит $d = a x_0 + b y_0, space.quad x_0, y_0 in ZZ$ $r = c - d q = a(x_1 -x_0q) + b(y_1 - y_0q) in I$ По определению остатка: $ cases( r >= 0, r < d ) $ но $d = \min{ c divides c in I, space c > 0 } ==>$ $ cases( r = 0, r = c - d q ) ==> c = d q ==> c in d ZZ ==> a ZZ + b ZZ subset d ZZ $ + Пусть $d$ --- наибольший общий делитель $a, b$. $a = a 1 + b 0 in I = d ZZ ==> d divides a$ $b = a 0 + b 1 in I = d ZZ ==> d divides b$ Пусть $d' divides a, space d' divides b, space d = a x_0 + b y_0$ $d' divides a x_0, space d' divides b y_0 ==> d' divides d$, значит $d$ действительно наибольший общий делитель $a, b$. ] #follow[ + $a, b in ZZ:$ Тогда наибольший общий делитель $a, b$ существует. + Если $d$ --- наибольший общий делитель $a, b$, то $exists x, y in ZZ: space d = a x + b y$ --- _Линейное представление наибольшего общего делителя_. ] #proof[ + Доказали в двух частях предложения. + Из первой части знаем, что существует $d_0$ --- наибольший общий делитель $a, b$, то есть $d_0 = a x_0 + b y_0$ $d$ ассоциирован с $d_0 ==> d = d_0 ZZ, space z in ZZ ==> d = a(x_0 z) + b(y_0 z)$ ] #def[ $"НОД"(a, b) <==> gcd(a, b)$ --- неотрицательный наибольший общий делитель $a, b$. ] #pr[ Пусть $a_1, a_2, b in ZZ: space a_1 equiv_(b) a_2$ Тогда $gcd(a_1, b)$ = $gcd(a_2, b)$. ] #proof[ $(!) space {c: c divides a_1, space c divides b } = {c : c divides a_2, space c divides b}$ "$subset$": $a_2 - a_1 = b m ==> a_2 = a_1 + b m$ $c divides a_1, space c divides b ==> c divides a_2$ "$supset$": $a_1 - a_2 = b m ==> a_1 = a_2 + b m$ $c divides a_2, space c divides b ==> c divides a_1$ Получается, что: $forall x in {c: c divides a_1, c divides b }: space x divides gcd(a_1, b)$ $forall x in {c: c divides a_2, c divides b }: space x divides gcd(a_2, b)$ $gcd(a_1, b) = gcd(a_2, b)$ ] #def(name: "<NAME>")[ $gcd(a, b) = gcd(b, a mod b)$, если $b eq.not 0$ ```cpp int gcd(int a, int b) { if (b == 0) return a; return gcd(b, a % b); } ``` ]
https://github.com/MasterTemple/typst-bible-plugin
https://raw.githubusercontent.com/MasterTemple/typst-bible-plugin/main/README.typ
typst
#import "bible.typ": bible_footnote, bible_quote, bible_quote_fmt #import "conf.typ": conf #show: doc => conf(doc) #show link: underline // create short-hands #let ul = underline #let fn = footnote #let string(content) = { let content = "\"" + content + "\"" raw(lang: "js", content) } = Typst Bible [insert screenshot here] #pagebreak() = Table of Contents #outline(indent: 2em, title: none) #pagebreak() == Purpose - To easily reference Bible verses for personal, ministerial, or academic papers - ESV is currently only supported translation #footnote[Since I am the only one using it, and I use ESV, I don't see the need to add another translation. If you use this and would like me to add another translation, let me know.] - If you have any great ideas, please open a #link("https://github.com/MasterTemple/typst-bible-plugin/issues", "GitHub Issue") == Import ```js "bible.typ" ``` `bible.typ` is meant to provide an API for interacting with `bible.wasm` ```typ #import "bible.typ": bible_footnote, bible_quote, bible_quote_fmt ``` == ```typ #bible_footnote() ``` ```typ I am blessed because my sins are forgiven! #bible_footnote("Romans 4:7") // or I am blessed because my sins are forgiven! ^ Romans 4:7 // but it is currently broken because there is a bug matching the `:` character ``` I am blessed because my sins are forgiven! #bible_footnote("Romans 4:7") == ```typ #bible_quote() ``` ```typ #bible_quote("Romans 4:7") // or > Romans 4:7 // but it is currently broken because there is a bug matching the `:` character ``` #bible_quote("Romans 4:7") == ```typ #bible_quote_fmt() ``` Note: Regular Expressions are supported and will be discussed in #link(<regex-support>, "RegEx Support"). ==== Basic This is just like using ```typ #bible_quote()``` with no additional formatting applied ```typ #bible_quote_fmt("Ephesians 4:28") ``` #bible_quote_fmt("Ephesians 4:28") ==== Bold #string("b") = bold match pattern (optional) ```typ #bible_quote_fmt("Ephesians 4:28", b: "doing honest work with his own hands") ``` #bible_quote_fmt("Ephesians 4:28", b: "doing honest work with his own hands") ==== Highlight #string("hl") = highlight match pattern (optional) ```typ #bible_quote_fmt("Ephesians 4:28", hl: "doing honest work with his own hands") ``` #bible_quote_fmt("Ephesians 4:28", hl: "doing honest work with his own hands") ==== Underline #string("ul") = underline match pattern (optional) ```typ #bible_quote_fmt("Ephesians 4:28", ul: "doing honest work with his own hands") ``` #bible_quote_fmt("Ephesians 4:28", ul: "doing honest work with his own hands") ==== Italics #string("it") = italics match pattern (optional) ```typ #bible_quote_fmt("Ephesians 4:28", it: "doing honest work with his own hands") ``` #bible_quote_fmt("Ephesians 4:28", it: "doing honest work with his own hands") ==== Custom #string("c") = custom match pattern to apply `fmt` filter (optional)\ #string("fmt") = custom formatting pattern (optional) ```typ #bible_quote_fmt("Ephesians 4:28", c: "doing honest work with his own hands", fmt: highlight.with(fill: red)) ``` #bible_quote_fmt("Ephesians 4:28", c: "doing honest work with his own hands", fmt: highlight.with(fill: red)) ==== Omit/Hide `omit` = omit content by replacing with elipse ... ```typ #bible_quote_fmt("Ephesians 4:28", omit: "doing honest work with his own hands") ``` #bible_quote_fmt("Ephesians 4:28", omit: "doing honest work with his own hands") === RegEx Support <regex-support> The parameters for matching are Regular Expressions, you can learn more about them at #link("https://typst.app/docs/reference/foundations/regex/"). ==== Removing beginning of quote This is a common operation For reference: #bible_quote_fmt("Ephesians 4:28") You could say: ```typ #bible_quote_fmt("Ephesians 4:28", omit: "Let the thief no longer steal, but") ``` To remove #string("Let the thief no longer steal, but") #bible_quote_fmt("Ephesians 4:28", omit: "Let the thief no longer steal, but") You could also say: ```typ #bible_quote_fmt("Ephesians 4:28", omit: "Let.*?rather") ``` To remove everything (#string(".*?")) between #string("Let") to #string("rather") (which equates to #string("Let the thief no longer steal, but")) #bible_quote_fmt("Ephesians 4:28", omit: "Let.*?rather") Or you could even say: ```typ #bible_quote_fmt("Ephesians 4:28", omit: "^.*?rather") ``` To remove everything (#string(".*?")) from the start of the verse (#string("^")) to the first #string("rather") (which again equates to #string("Let the thief no longer steal, but")) #bible_quote_fmt("Ephesians 4:28", omit: "^.*?rather") Explanation: - #string("^") begins the match at the start of the line - #string(".*") matches anything (#string(".")) as many times as it can (#string("*")) - #string(".*?") is the same way, but it matches as many as necessary - In other words, #string(".*rather") will match everything up to #underline[the last] #string("rather"), but #string(".*?rather") will match everything up until #underline[the first] rather ==== Removing end of quote It is the same thing to remove the end, but you use the #string("$") character: ```typ #bible_quote_fmt("Ephesians 4:28", omit: ", so.*$") ``` #bible_quote_fmt("Ephesians 4:28", omit: ", so.*$") This removes everything (#string(".*")) from #string(", so") to the end of the line #string("$") (or #string(", so that he may have something to share with anyone in need.")). ==== Removing beginning and end of quote ```typ #bible_quote_fmt("Ephesians 4:28", omit: "^.*?rather|, so.*$") ``` #bible_quote_fmt("Ephesians 4:28", omit: "^.*?rather|, so.*$") This does both - #string("^.*?rather") starts at the beginning (#string("^")) and removes everything up to (#string(".*?")) the first #string("rather"). - #string(", so.*$") removes everything (#string(".*")) from #string(", so") to the end of the line (#string("$")) - #string("|") joins the 2 patterns together with a logical `OR` operation, meaning it will do the first pattern or second pattern (and if it can do both, it does both) == Additional Information === Naming I will try and provide clear naming conventions. If you do not prefer that, you can just rename them as follows: ```typ #let v = bible_quote // ... #v("1 John 3:2") ``` == Building WASM To build: ```bash wasm-pack build --target web ``` I use a script that deletes and re-links the file so that Typst knows to re-check the contents: ```bash ./run.sh ```
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/meta/link-09.typ
typst
Other
// Test link to label. Text <hey> #link(<hey>)[Go to text.]
https://github.com/PA055/5839B-Notebook
https://raw.githubusercontent.com/PA055/5839B-Notebook/main/Entries/drivetrain/drive-train-types.typ
typst
#import "/packages.typ": notebookinator #import notebookinator: * #import themes.radial.components: * #show: create-body-entry.with( title: "Drive Train Types", type: "brainstorm", date: datetime(year: 2024, month: 3, day: 17), author: "<NAME>", witness: "<NAME>" ) = New Drive Models There exists a variety of drive models both practicle and impracticle that can be made with the VRC legal parts. It is important to judge where each one can shine to see which is the most practical when the next game releases. A decision can not be made yet for which drive is best, but the strenghts and weakness of each one can be assesed as well as models for the more practical ones generated. These models can give us a head start on the next seasons bot if they prove adequate for the next game as well as allow the team to test various ideas. = Tank Drive A large variety of what can be considered a tank drive or differential drives exist within vex. These work by having two sides where each sides wheels all spin together. This allows for linear motion(Both sides spin the same direction), turning (Both sides spin opposite), and arcing (One side spins slower then the other in the same direction). These drives are often the simpilist and provide a wide range of motion while remaining able to push back against other robots. These drives can also be achieved in a variety of ways with varying numbers and sizes of wheels that augment their performance. 4in wheels provide greater speed as per each rotation the robot moves farther, however they give the robot less torque. Additonally since less of them can fit onto a dirve with the 18*18*18 size limit there is also less points of contact. Additionaly with the older 4in wheels the team currently own the traction versions are .125in smaller then the omni versions 3.25in wheels provide slower speeds, but are able to give the robot more pushing power as they have more torque and points of contact. These wheels are also easier to work with as the traction and omni versions are the same size unlike with the older 4in wheels Omni wheels have rollers that allow the wheel to move side to side as well as forward and back. This makes them great for turning, but poor for traction. Traction wheels wheels are all rubber and provide exceptional ground adherence for any robot, however they greatly limit turning making them impractical unless used as the middle wheel where there effects on truning are midigated. From our teams expirence a 3.25in drive with 2 traction wheels in the middle and 2 omni wheels on either end appears to be the optimal way to execute this drive. Our previous drive with 3 4in omniwheels failed to push back agaisnt other robots that were using 3.25 in tank drives with the same amount of motors. These robots also were just as fast and maneuverable as ours showing little trade off for this design. #pro-con( pros: [ - Simplicity - Versatility - Easier to Control ], cons: [ - Limited Mobility - Wheel incompatabilities ] ) = H/X Drives These use either 4 or 5 omni wheels to achieve a robot that has the same range of motion as a Tank Drive, but with the additon of diagnol and horizantal movements. They either use in the case of an X drive 4 indivudally powered omni wheels in each corner at 90 degress from one another or 4 indivudallty powered onmi wheels in a traditional tank drive setup with one horizantal omni wheel for the H drive. These drives can however, prove dificult to control and in the case of the H drive impractical as the horizantal wheel rarley makes contact. They are also very easy to push around since all the wheels are omni. X drives can prove highly practical given the right game and design but in games such as over under the middle bar limits their use. #pro-con( pros: [ - Maneuberability - Complex Autonmous - Strafing ], cons: [ - Mechanical Complexity - Motor Usage - Practicality - Low Traction/Easy to push ] ) = Mecanum Drives Mecanum drives are likley the most special as they use specialized mecanum wheels. These like omni wheels have roller attached, but at an angle to provide unqiuley augmented movment. When set up correctly 4 indvidually powered mechnum wheels can provide the same movment as a X drive. However, since to go in any direction it directly turns the mecanum wheels they are harder to push as the motors resist the pushing directly. This along with other issues can also lead to faster overheating with mecanum drives. The vex edr 4in mecanum wheels are very bulky putting more strain on the motor additonally, the vex mecanum wheels unlike msot desings have limited contact with the ground due to the iregular design of their rollers. It is also important to note it easier to gear and build a frame for a mecnum drive over and X drive as it does not require the 45 degree angels to achieve its unique motion. Though also possible with an X drive an additional powered omni wheel could be put into the middle to provide more drive power. Since this wheel isnt needed at all times if a succesful PTO can be developed it could allow for a very versatile robot and drive. #pro-con( pros: [ - Maneuberability - Complex Autonmous - Strafing ], cons: [ - Mechanical Complexity - Motor Usage - Practicality - Requires balanced weight ] ) = Swerve Drives Previously considered impractical for vex swerve drives invlove either 3 or 4 independently steered and powered wheels. These focus around modules that can both rotate the oreintation of and spin the wheels. This allows for the robot to turn rapidly as well as turn while moving. The wheels can positioned in the manner of a tradtional tank drive for linear movement and then turned to go the desired driection. However until the addition of the 5.5w motors these would either use 6 or all 8 of the robots availble motors. The 5.5w motors now allow for this drive to be possibly practical as a 3 wheel swerve drive could be made from 3 11w motors and 3 5.5w motors allowing for 38.5w of motors to be allocated to the robots mechanisms and manipulators. The advantages of swerve drives can be seen from other competitions like frc where they are often used to great success to create highly maneuverable bots. The use of one within vex would be highyl dependent on the game as one that with limtied room to move such as over under takes away many of a swerve drives advantages. It is worth creating a model for a swerve drive module incase the next game is one that priortizes movement. It would also provide practice using more complex gearing which the team has yet to expirment with. #pro-con( pros: [ - Maneuberability - Complex Autonmous ], cons: [ - Mechanical Complexity - Motor Usage - Practicality ] )
https://github.com/01mf02/jq-lang-spec
https://raw.githubusercontent.com/01mf02/jq-lang-spec/main/json.typ
typst
#import "common.typ": * = JSON values <json> In this section, we will define JSON values. Furthermore, we will define several functions and operations on values. A JSON value $v$ has the shape $ v := "null" #or_ "false" #or_ "true" #or_ n #or_ s #or_ [v_0, ..., v_n] #or_ {k_0 |-> v_0, ..., k_n |-> v_n}, $ where $n$ is a number and $s$ is a string. We write a string $s$ as $c_0...c_n$, where $c$ is a character. A value of the shape $[v_0, ..., v_n]$ is called an _array_ and a value of the shape ${k_0 |-> v_0, ..., k_n |-> v_n}$ is an unordered map from _keys_ $k$ to values that we call an _object_.#footnote[ The JSON syntax uses ${k_0: v_0, ..., k_n: v_n}$ instead of ${k_0 |-> v_0, ..., k_n |-> v_n}$. However, in this text, we use the ${k_0: v_0, ..., k_n: v_n}$ syntax to denote the _construction_ of objects, and use ${k_0 |-> v_0, ..., k_n |-> v_n}$ syntax to denote actual objects. ] In JSON, object keys are strings.#footnote[ YAML is a data format similar to JSON. While YAML can encode any JSON value, it additionally allows any YAML values to be used as object keys, where JSON allows only strings to be used as object keys. This text deliberately distinguishes between object keys and strings. That way, extending the given semantics to use YAML values should be relatively easy. ] We assume that the union of two objects is _right-biased_; i.e., if we have two objects $l$ and $r = {k |-> v, ...}$, then $(l union r)(k) = v$ (regardless of what $l(k)$ might yield). By convention, we will write in the remainder of this section $c$ for characters and $k$ for object keys. We will sometimes write arrays as $[v_0, ..., v_n]$ and sometimes as $[v_1, ..., v_n]$: The former case is useful to express that $n$ is the maximal index of the array (having length $n+1$), and the latter case is useful to express that the array has length $n$. The same idea applies also to strings, objects, and streams. A number can be an integer or a decimal, optionally followed by an integer exponent. For example, $0$, $-42$, $3.14$, $3 times 10^8$ are valid JSON numbers. This text does not fix how numbers are to be represented, just like the JSON standard does not impose any representation.#footnote[ jq uses floating-point numbers to encode both integers and decimals. However, several operations in this text (for example those in @json-access) make only sense for natural numbers $NN$ or integers $ZZ$. In situations where integer values are expected and a number $n$ is provided, jq generally substitutes $n$ by $floor(n)$ if $n >= 0$ and $ceil(n)$ if $n < 0$. For example, accessing the $0.5$-th element of an array yields its $0$-th element. In this text, we use do not document this rounding behaviour for each function. ] Instead, it just assumes that the type of numbers has a total order (see @json-order) and supports the arithmetic operations $+$, $-$, $times$, $div$, and $mod$ (modulo). == Construction <json-construction> In this subsection, we will introduce operators to construct arrays and objects. The function $[dot]$ transforms a stream into an array if all stream elements are values, or into the first exception in the stream otherwise: $ [stream(v_0, ..., v_n)] := cases( v_i & "if" v_i "is an exception and for all" j < i", " v_j "is a value", [v_0, ..., v_n] & "otherwise", ) $ Given two values $k$ and $v$, we can make an object out of them: $ {k: v} := cases( {k |-> v} & "if" k "is a string and" v "is a value", "error" & "otherwise", ) $ We can construct objects with multiple keys by adding objects, see @arithmetic. == Simple functions <simple-fns> We are now going to define several functions that take a value and return a value. The _keys_ of a value are defined as follows: $ "keys"(v) := cases( stream(0 , ..., n) & "if" v = [v_0, ..., v_n], stream(k_0) + "keys"(v') & "if" v = {k_0 |-> v_0} union v' "and" k_0 = min("dom"(v)), stream() & "if" v = {}, stream("error") & "otherwise", ) $ For an object $v$, $"keys"(v)$ returns the domain of the object sorted by ascending order. For the used ordering, see @json-order. We define the _length_ of a value as follows: $ |v| := cases( 0 & "if" v = "null", |n| & "if" v "is a number" n, n & "if" v = c_1...c_n, n & "if" v = [v_1, ..., v_n], n & "if" v = {k_1 |-> v_1, ..., k_n |-> v_n}, "error" & "otherwise (if" v in {"true", "false"}")", ) $ The _boolean value_ of a value $v$ is defined as follows: $ "bool"(v) := cases( "false" & "if" v = "null" "or" v = "false", "true" & "otherwise", ) $ We can draw a link between the functions here and jq: When called with the input value $v$, the jq filter `keys` yields $stream(["keys"(v)])$, the jq filter `length` yields $stream(|v|)$, and the jq filter `true and .` yields $stream("bool"(v))$. == Arithmetic operations <arithmetic> We will now define a set of arithmetic operations on values. We will link these later directly to their counterparts in jq: Suppose that the jq filters `f` and `g` yield $stream(l)$ and $stream(r)$, respectively. Then the jq filters `f + g`, `f - g`, `f * g`, `f / g`, and `f % g` yield $stream(l + r)$, $stream(l - r)$, $stream(l times r)$, $stream(l div r)$, and $stream(l mod r)$, respectively. === Addition We define addition of two values $l$ and $r$ as follows: $ l + r := cases( v & "if" l = "null" "and" r = v", or" l = v "and" r = "null", n_1 + n_2 & "if" l "is a number" n_1 "and" r "is a number" n_2, c_(l,1)...c_(l,m)c_(r,1)...c_(r,n) & "if" l = c_(l,1)...c_(l,m) "and" r = c_(r,1)...c_(r,n), [stream(l_1, ..., l_m, r_1, ..., r_n)] & "if" l = [l_1, ..., l_m] "and" r = [r_1, ..., r_n], l union r & "if" l = {...} "and" r = {...}, "error" & "otherwise", ) $ Here, we can see that $"null"$ serves as a neutral element for addition. For strings and arrays, addition corresponds to their concatenation, and for objects, it corresponds to their union. === Multiplication #let merge = $union.double$ Given two objects $l$ and $r$, we define their _recursive merge_ $l merge r$ as: $ l merge r := cases( {k |-> v_l merge v_r} union l' merge r' & "if" l = {k |-> v_l} union l'"," r = {k |-> v_r} union r'", and" v_l"," v_r "are objects", {k |-> v_r} union l' merge r' & "if" l = {k |-> v_l} union l'"," r = {k |-> v_r} union r'", and" v_l "or" v_r "is not an object", {k |-> v_r} union l merge r' & "if" k in.not "dom"(l) "and" r = {k |-> v_r} union r', l & "otherwise (if" r = {} ")", ) $ We use this in the following definition of multiplication of two values $l$ and $r$: $ l times r := cases( n_1 times n_2 & "if" l "is a number" n_1 "and" r "is a number" n_2, l + l times (r - 1) & "if" l "is a string and" r in NN without {0}, "null" & "if" l "is a string and" r = 0, r times l & "if" r "is a string and" l in NN, l merge r & "if" l "and" r "are objects", "error" & "otherwise" ) $ We can see that multiplication of a string $s$ with a natural number $n > 0$ returns $sum_(i = 1)^n s$; that is, the concatenation of $n$ times the string $s$. The multiplication of two objects corresponds to their recursive merge as defined above. === Subtraction We now define subtraction of two values $l$ and $r$: $ l - r := cases( n_1 - n_2 & "if" l "is a number" n_1 "and" r "is a number" n_2, [sum_(i, l_i in {r_0, ..., r_n}) stream(l_i) ] & "if" l = [l_0, ..., l_n] "and" r = [r_0, ..., r_n], "error" & "otherwise" ) $ When both $l$ and $r$ are arrays, then $l - r$ returns an array containing those values of $l$ that are not contained in $r$. === Division We will now define a function that splits a string $y + x$ by some non-empty separator string $s$. The function preserves the invariant that $y$ does not contain $s$: $ "split"(x, s, y) := cases( "split"(c_1...c_n, s, y + c_0) & "if" x = c_0...c_n "and" c_0...c_(|s| - 1) != s, [stream(y)] + "split"(c_(|s|)...c_n, s, qs("")) & "if" x = c_0...c_n "and" c_0...c_(|s| - 1) = s, [stream(y)] & "otherwise" (|x| = 0), ) $ We use this splitting function to define division of two values: $ l div r := cases( n_1 div n_2 & "if" l "is a number" n_1 "and" r "is a number" n_2, [] & "if" l "and" r "are strings and " |l| = 0, [sum_i stream(c_i)] & "if" l = c_0...c_n "," r "is a string," |l| > 0", and" |r| = 0, "split"(l, r, qs("")) & "if" l "and" r "are strings," |l| > 0", and" |r| > 0, "error" & "otherwise" ) $ #example[ Let $s = qs("ab")$. We have that $s div s = [qs(""), qs("")]$. Furthermore, $qs("c") div s = [qs("c")]$, $(s + qs("c") + s ) div s = [qs(""), qs("c"), qs("") ]$ and $(s + qs("c") + s + qs("de")) div s = [qs(""), qs("c"), qs("de")]$. ] From this example, we can infer the following lemma. #lemma[ Let $l$ and $r$ strings with $|l| > 0$ and $|r| > 0$. Then $l div r = [l_0, ..., l_n]$ for some $n > 0$ such that $l = (sum_(i = 0)^(n - 1) (l_i + r)) + l_n$ and for all $i$, $l_i$ is a string that does not contain $r$ as substring. ] === Remainder For two values $l$ and $r$, the arithmetic operation $l mod r$ (modulo) yields $m mod n$ if $l$ and $r$ are numbers $m$ and $n$, otherwise it yields an error. == Accessing <json-access> We will now define _access operators_. These serve to extract values that are contained within other values. The value $v[i]$ of a value $v$ at index $i$ is defined as follows: $ v[i] := cases( v_i & "if" v = [v_0, ..., v_n] "," i in NN", and" i <= n, "null" & "if" v = [v_0, ..., v_n] "," i in NN", and" i > n, v[n+i] & "if" v = [v_0, ..., v_n] "," i in ZZ without NN", and" 0 <= n+i, v_j & "if" v = {k_0 |-> v_0, ..., k_n |-> v_n}"," i "is a string, and" k_j = i, "null" & "if" v = {k_0 |-> v_0, ..., k_n |-> v_n}"," i "is a string, and" i in.not {k_0, ..., k_n}, "error" & "otherwise", ) $ The idea behind this index operator is as follows: It returns $"null"$ if the value $v$ does not contain a value at index $i$, but $v$ could be _extended_ to contain one. More formally, $v[i]$ is $"null"$ if $v != "null"$ and there exists some value $v' = v + delta$ such that $v'[i] != "null"$. The behaviour of this operator for $i < 0$ is that $v[i]$ equals $v[abs(v) + i]$. #example[ If $v = [0, 1, 2]$, then $v[1] = 1$ and $v[-1] = v[3 - 1] = 2$. ] Using the index operator, we can define the values $v[]$ in a value $v$ as follows: $ v[] := sum_(i in"keys"(v)) stream(v[i]) $ When provided with an array $v = [v_0, ..., v_n]$ or an object $v = {k_0 |-> v_0, ..., k_n |-> v_n}$ (where $k_0 < ... < k_n$), $v[]$ returns the stream $stream(v_0, ..., v_n)$. Next, we define a slice operator: $ v[i:j] := cases( [sum_(k = i)^(j-1) stream(v_k)] & "if" v = [v_0, ..., v_n] "and" i","j in NN, sum_(k = i)^(j-1) c_k & "if" v = c_0...c_n "and" i","j in NN, v[(n+i):j] & "if" |v| = n", " i in ZZ without NN", and" 0 <= n+i, v[i:(n+j)] & "if" |v| = n", " j in ZZ without NN", and" 0 <= n+j, "error" & "otherwise", ) $ Note that unlike $v[]$ and $v[i]$, $v[i:j]$ may yield a value if $v$ is a string. If we have that $i, j in NN$ and either $i > n$ or $i >= j$, then $v[i:j]$ yields an empty array if $v$ is an array, and an empty string if $v$ is a string. #example[ If $v = [0, 1, 2, 3]$, then $v[1:3] = [1, 2]$. ] @value-ops demands all access operators to yield a _stream_ of value results, yet only $v[]$ fulfills this, whereas $v[i]$ and $v[i:j]$ return a single value result. For that reason, we now redefine these operators to return a stream of value results, by $ v[i] &:= stream(v[i]) \ v[i:j] &:= stream(v[i:j]) $ Finally, we define the remaining access operators by using the slice operator: $ v[:j] &:= v[0: &j] \ v[i:] &:= v[i:&|v|] $ When $|v|$ yields an error, then $v[i:]$ yields an error, too. == Updating <json-update> For each access operator in @json-access, we will now define an _updating_ counterpart. Intuitively, where an access operator yields some elements contained in a value $v$, its corresponding update operator _replaces_ these elements in $v$ by the output of a function. The access operators will be used in @semantics, and the update operators will be used in @updates. All update operators take at least a value $v$ and a function $f$ from a value to a stream of value results. We extend the domain of $f$ to value results such that $f(e) = stream(e)$ if $e$ is an exception. The first update operator will be a counterpart to $v[]$. For all elements $x$ that are yielded by $v[]$, $v[] update f$ replaces $x$ by $f(x)$: $ v[] update f := cases( [sum_i f(v_i)] & "if" v = [v_0, ..., v_n], union.big_i cases({k_i : h} & "if" f(v_i) = stream(h) + t, {} & "otherwise") & "if" v = {k_0 |-> v_0, ..., k_n |-> v_n}, "error" & "otherwise", ) $ For an input array $v = [v_0, ..., v_n]$, $v[] update f$ replaces each $v_i$ by the output of $f(v_i)$, yielding $[f(v_0) + ... + f(v_n)]$. For an input object $v = {k_0 |-> v_0, ..., k_n |-> v_n}$, $v[] update f$ replaces each $v_i$ by the first output yielded by $f(v_i)$ if such an output exists, otherwise it deletes ${k_i |-> v_i}$ from the object. Note that updating arrays diverges from jq, because jq only considers the first value yielded by $f$. For the next operators, we will use the following function $"head"(l, e)$, which returns the head of a list $l$ if it is not empty, otherwise $e$: $ "head"(l, e) := cases( h & "if" l = stream(h) + t, e & "otherwise", ) $ The next function takes a value $v$ and replaces its $i$-th element by the first output of $f$, or deletes it if $f$ yields no output: $ v[i] update f := cases( v[0:i] + ["head"(f(v[i]), stream())] + v[(i+1):n] & "if" v = [v_0, ..., v_n]", " i in NN", and" i <= n, /* v[0:i] + [h] + v[(i+1):n] & "if" v = [v_0, ..., v_n]", " i in NN"," i <= n", and" f(v[i]) = stream(h) + t, v[0:i] + v[(i+1):n] & "if" v = [v_0, ..., v_n]", " i in NN"," i <= n", and" f(v[i]) = stream(), */ v[n+i] update f & "if" v = [v_0, ..., v_n]", " i in ZZ without NN", and" 0 <= n+i, v + {i: h} & "if" v = {...} "and" f(v[i]) = stream(h) + t, union.big_(k in "dom"(v) without {i}) {k |-> v[k]} & "if" v = {...} "and" f(v[i]) = stream(), "error" & "otherwise", ) $ Note that this diverges from jq if $v = [v_0, ..., v_n]$ and $i > n$, because jq fills up the array with $"null"$. // but we unfortunately cannot use it to define {k: f}, because if f returns the empty list, // we cannot provide a default element e that would make the key disappear The final function here is the update counterpart of the operator $v[i:j]$. It replaces the slice $v[i:j]$ by the first output of $f$ on $v[i:j]$, or by the empty array if $f$ yields no output. $ v[i:j] update f := cases( v[0:i] + "head"(f(v[i:j]), []) + v[j:n] & "if" v = [v_0, ..., v_n]", " i","j in NN", and" i <= j, v & "if" v = [v_0, ..., v_n]", " i","j in NN", and" i > j, v[(n+i):j] update f & "if" |v| = n", " i in ZZ without NN", and" 0 <= n+i, v[i:(n+j)] update f & "if" |v| = n", " j in ZZ without NN", and" 0 <= n+j, "error" & "otherwise", ) $ Unlike its corresponding access operator $v[i:j]$, this operator unconditionally fails when $v$ is a string. This operator diverges from jq if $f$ yields $"null"$, in which case jq returns an error, whereas this operator treats this as equivalent to $f$ returning $[]$. #example[ If $v = [0, 1, 2, 3]$ and $f(v) = [4, 5, 6]$, then $v[1:3] update f = [0, 4, 5, 6, 3]$. ] Similarly to @json-access, we define the remaining operators by $v[i:j]$: $ v[:j] update f &:= v[0: &j] update f \ v[i:] update f &:= v[i:&|v|] update f $ == Order <json-order> In this subsection, we establish a total order on values.#footnote[ Note that jq does _not_ implement a _strict_ total order on values; in particular, its order on (floating-point) numbers specifies $"nan" < "nan"$, from which follows that $"nan" != "nan"$ and $"nan" gt.not "nan"$. ] We have that $ "null" < "false" < "true" < n < s < a < o, $ where $n$ is a number, $s$ is a string, $a$ is an array, and $o$ is an object. We assume that there is a total order on numbers and characters. Strings and arrays are ordered lexicographically. Two objects $o_1$ and $o_2$ are ordered as follows: For both objects $o_i$ ($i in {1, 2}$), we sort the array $["keys"(o_i)]$ by ascending order to obtain the ordered array of keys $k_i = [k_1, ..., k_n]$, from which we obtain $v_i = [o[k_1], ..., o[k_n]]$. We then have $ o_1 < o_2 <==> cases( k_1 < k_2 & "if" k_1 < k_2 "or" k_1 > k_2, v_1 < v_2 & "otherwise" (k_1 = k_2) ) $
https://github.com/GYPpro/Java-coures-report
https://raw.githubusercontent.com/GYPpro/Java-coures-report/main/Report/4.typ
typst
#set text(font:("Times New Roman","Source Han Serif SC")) #show raw.where(block: false): box.with( fill: luma(240), inset: (x: 3pt, y: 0pt), outset: (y: 3pt), radius: 2pt, ) // Display block code in a larger block // with more padding. #show raw.where(block: true): block.with( fill: luma(240), inset: 10pt, radius: 4pt, ) #set math.equation(numbering: "(1)") #set text( font:("Times New Roman","Source Han Serif SC"), style:"normal", weight: "regular", size: 13pt, ) #set page( paper:"a4", number-align: right, margin: (x:2.54cm,y:4cm), header: [ #set text( size: 25pt, font: "KaiTi", ) #align( bottom + center, [ #strong[暨南大学本科实验报告专用纸(附页)] ] ) #line(start: (0pt,-5pt),end:(453pt,-5pt)) ] ) #show raw: set text( font: ("consolas", "Source Han Serif SC") ) = 实现一个trie(字典树)模板 \ #text("*") 实验项目类型:设计性\ #text("*")此表由学生按顺序填写\ #text( font:"KaiTi", size: 15pt )[ 课程名称#underline[#text(" 面向对象程序设计/JAVA语言 ")]成绩评定#underline[#text(" ")]\ 实验项目名称#underline[#text(" 实现一个trie(字典树)模板 ")]指导老师#underline[#text(" 干晓聪 ")]\ 实验项目编号#underline[#text(" 1 ")]实验项目类型#underline[#text(" 设计性 ")]实验地点#underline[#text(" 数学系机房 ")]\ 学生姓名#underline[#text(" 郭彦培 ")]学号#underline[#text(" 2022101149 ")]\ 学院#underline[#text(" 信息科学技术学院 ")]系#underline[#text(" 数学系 ")]专业#underline[#text(" 信息管理与信息系统 ")]\ 实验时间#underline[#text(" 2023年10月27日上午 ")]#text("~")#underline[#text(" 2023年10月27日中午 ")]\ ] #set heading( numbering: "一、" ) #set par( first-line-indent: 1.8em) = 实验目的 \ #h(1.8em)练习字符串相关操作 = 实验环境 \ #h(1.8em)计算机:PC X64 操作系统:Windows 编程语言:Java IDE:Visual Studio Code = 程序原理 \ #h(1.8em)字典树,英文名Trie,顾名思义为类似字典的树状结构。 字典树的每条边代表一个字母,从根节点到树上某个节点的路径则代表了一个字符串。我们用$theta (u,c)$表示节点$u$的$c$字符指向的下一个节点,或者说节点$u$代表的字符串后添加一个字符$c$形成的字符串的节点。 字典树最基础的应用为:查找一个字符串是否在字典中出现过,即$exists$字符串$s$和一个索引$i$,$s.t. forall c < s".size()" ,$有$"tire"(s+c) = s(c)$ 具体实现的过程使用了`String`类、`ArrayList`类、`HashMap`类 = 程序代码 文件`sis3\TrieTree`实现了字典树类 ```java package sis3; import java.util.ArrayList; import java.util.HashMap; public class TrieTree { public ArrayList<HashMap<Character, Integer>> t; public int root = 0; public TrieTree() { t = new ArrayList<HashMap<Character, Integer>>(); // t.add(new HashMap<Character, Integer>()); } public void addedge(String _s) { int pvidx = root; _s += '-'; for (int i = 0; i < _s.length(); i++) { if (t.get(pvidx).containsKey(_s.charAt(i))) { pvidx = t.get(pvidx).get(_s.charAt(i)); } else { t.get(pvidx).put(_s.charAt(i), t.size()); t.add(new HashMap<Character, Integer>()); pvidx = t.get(pvidx).get(_s.charAt(i)); } } } public boolean ifcmp(String s) { int pvidx = root; for (int i = 0; i < s.length(); i++) { if (t.get(pvidx).containsKey(s.charAt(i))) pvidx = t.get(pvidx).get(s.charAt(i)); else return false; } return t.get(pvidx).containsKey('-'); } } ``` 文件`sis\Text.java`实现了输入输出与测试数据处理 ```java package sis3; import java.util.Scanner; // 输入一行一个整数T,表示数据组数T 对于每组数据,格式如下 // 第一行是两个整数,分别表示模式串的个数n和询问的个数q // 接下来n行,每行一个模式串,表示模式串集合 接下来q行,每行一个询问,表示询问集合 public class Test { public static void main(String[] args) { try (Scanner sc = new Scanner(System.in)) { int T = sc.nextInt(); TrieTree t = new TrieTree(); while (T --> 0) { int n = sc.nextInt(); int q = sc.nextInt(); sc.nextLine(); while (n-- > 0) { String s = sc.nextLine(); t.addedge(s); } while (q-- > 0) { String s = sc.nextLine(); if (t.ifcmp(s)) System.out.println("YES"); else System.out.println("NO"); } } } } } ``` = 出现的问题、原因与解决方法 \ #h(1.8em)编码过程十分顺利 = 测试数据与运行结果 #figure( table( align: left + horizon, columns: 3, [*输入*],[*输出*],[*解释*], [`3`],[],[输入测试样例数量], [`3 3 fusufusu fusu anguei`],[` `],[记录测试用字典与测试字符串数量], [`fusu`],[`2`],[字典中有两个字符串满足前缀为fusu], [`anguei`],[`1`],[字典中有一个字符串满足前缀为anguei], [`kkksc`],[`0`],[字典中有零个字符串满足前缀为kkksc], [`5 2 fusu Fusu AFakeFusu afakefusu fusuisnotfake`],[],[记录第二个测试样例\ 测试用字典与测试字符串数量], [`Fusu`],[`1`],[字典中有一个字符串满足前缀为Fusu], [`fusu`],[`2`],[字典中有两个字符串满足前缀为fusu], [`1 1 998244353`],[],[记录第三个测试样例\ 测试用字典与测试字符串数量], [`9`],[`1`],[字典中有一个字符串满足前缀为9], ) )
https://github.com/piepert/philodidaktik-hro-phf-ifp
https://raw.githubusercontent.com/piepert/philodidaktik-hro-phf-ifp/main/src/parts/ephid/rahmenplaene/kompetenzen.typ
typst
Other
#import "/src/template.typ": * == #ix("Kompetenzen", "Kompetenz", "Kompetenz in den Rahmenplänen") Der Unterricht in Mecklenburg-Vorpommern ist kompetenzorientiert. Die Ausbildung von Komptenzen steht daher im Vordergrund. Für die Orientierungsstufe und weiterführende Stufe werden andere Kompetenzen angelegt als für die Oberstufe. Sie werden wie folgt bestimmt: #orange-list-with-body[*fachübergreifende #ix("Kompetenzen", "Kompetenz")*#en[Vgl. @MBWKMV2002_RP710[S. 3 ff]] #h(1fr) Stufe 5-10][ Die Kompetenzen der Klassenstufen 5 bis 10 bestehen aus #ix("Sachkompetenz"), #ix("Sozialkompetenz"), #ix("Methodenkompetenz") und #ix("Selbstkompetenz"), die zusammen #ix("Handlungskompetenz") ergeben sollen. #align(center, image("kompetenzei.png", width: 50%)) #set par(justify: false) #grid(columns: 2, column-gutter: 1.5em, row-gutter: 1.5em, [ *#ix("Sachkompetenz")* - Fachwissen erwerben und verfügbar halten - Können ausbilden - Zusammenhänge erkennen - erworbenes Wissen und Können in Handlungszusammenhängen anwenden - Wissen zu sachbezogenen Urteilen heranziehen - Probleme und Problemsituationen erkennen, analysieren und flexibel verschiedene Lösungswege erproben ], [ *#ix("Methodenkompetenz")* - rationell arbeiten - Arbeitsschritte zielgerichtet planen und anwenden - Informationen beschaffen, speichern, in ihrem spezifischen Kontext bewerten und sachgerecht aufbereiten (besonders auch unter Zuhilfenahme der Neuen Medien) - Ergebnisse strukturieren und präsentieren ], [ *#ix("Selbstkompetenz")* - eigene Stärken und Schwächen erkennen und einschätzen - Selbstvertrauen und Selbständigkeit entwickeln - Verantwortung übernehmen und entsprechend handeln - sich Arbeits- und Verhaltensziele setzen - zielstrebig und ausdauernd arbeiten - mit Erfolgen und Misserfolgen umgehen - Hilfe anderer annehmen und anderen leisten ], [ *#ix("Sozialkompetenz")* - mit anderen gemeinsam lernen und arbeiten - eine positive Grundhaltung anderen gegenüber einnehmen - anderen einfühlsam begegnen - sich an vereinbarte Regeln halten - solidarisch und tolerant handeln - mit Konflikten angemessen umgehen ]) ][*fachspezifische #ix("Kompetenzen", "Kompetenz")*#en[Vgl. @MBWKMV2019_RP1112[S. 7. f]] #h(1fr) Stufe 11/12][ Für die gymnasiale Oberstufe werden fachspezifische #ix("Kompetenzen", "Kompetenz") als Grundlage für den Philosophieunterricht festgelegt: #ix("Wahrnehmungs- und Deutungskompetenz", "Wahrnehmungskompetenz", "Deutungskompetenz"), #ix("Argumentations- und Urteilskompetenz"), #ix("Darstellungskompetenz"), #ix("Praktische Kompetenz"). Diese Kompetenzen aggregieren sich zu einer *#ix("Reflexionskompetenz")*. Die philosophischen Reflexionsmethoden sollen gestärkt werden. #grid(columns: 2, column-gutter: 1.5em, row-gutter: 1.5em, [ *#ix("Wahrnehmungs- und Deutungskompetenz", "Wahrnehmungskompetenz", "Deutungskompetenz")* ... beschreibt die Fähigkeit, philosophische Folgen zu erkennen, beschreiben, selbständig formulieren und mit philosophsichen Fragen und Erkenntnissen in Beziehung setzen zu können. Philosophische Aussagen, Probleme und Fragen können erfasst und dargestellt werden. ], [ *#ix("Argumentations- und Urteilskompetenz")* ... meint, Begriffe und philosophische Argumente sowie ihre Konsequenzen erschließen, wiedergeben, vergleichen, kritisch zu prüfen und bewerten zu können. Die SuS können selbständige zu begründeten Urteilen kommen. ], [ *#ix("Darstellungskompetenz")* ... setzt sich mit der adäquaten, selbständigen, adressatenbezogen und vielfältigen Darstellung philosophischer Gedanken auseinander. ], [ *#ix("Praktische Kompetenz")* ... hat das Ziel, sich im eigenen Denken zurechtzufinden, dieses selbständig zu machen und Verantwortung zu übernehmen. Unterschiedliche weltanschauliche, moralische und ethische Differenzen werden akzeptiert und geachtet. ]) ]
https://github.com/JamesWrigley/euxfel-polylux-theme
https://raw.githubusercontent.com/JamesWrigley/euxfel-polylux-theme/master/theme.typ
typst
#import "@preview/polylux:0.3.1": * #let xDarkBlue = rgb(11, 21, 70) #let xOrange = rgb(243, 146, 0) #let xLightBlue = rgb(85, 157, 187) #let euxfel-theme(aspect-ratio: "16-9", title: "Title", author: "Author", title-page-header: true, font: "New Computer Modern Sans", body) = { let item-rect(fill-color) = [#rect(width: 30pt, height: 15pt, fill: fill-color)] set list(marker: ([#item-rect(xOrange)], [#item-rect(xLightBlue)], [#image("subsubitem.svg")])) set text(size: 20pt, font: font) set page( paper: "presentation-" + aspect-ratio, margin: (top: 70pt), header: context { if title-page-header or counter(page).get().first() > 1 { set text(size: 12pt) grid(columns: (1fr, 1fr), column-gutter: 50pt, {line(length: 100%); title}, {stack(dir: ltr, align(left)[#line(length: 100%); #author], align(right)[#counter(page).display()] )} ) } }, footer: [ #set text(size: 15pt) #stack(dir: ltr, spacing: 5pt, rect(width: 80pt, height: 10pt, fill: xDarkBlue), rect(width: 30pt, height: 10pt, fill: xDarkBlue), rect(width: 15pt, height: 10pt, fill: xOrange), [*European XFEL*] ) ] ) polylux-slide[ #align(horizon)[ #grid( columns: (3fr, 1fr), column-gutter: 50pt, align(left)[ #set text(size: 22pt) = #title #author ], align(right)[ #image("logo.svg") ] ) ] ] body } #let slide(title, body) = { polylux-slide[ == #title #v(15pt) #body ] }
https://github.com/PhilChodrow/cv
https://raw.githubusercontent.com/PhilChodrow/cv/main/src/content/awards.typ
typst
#import "../template.typ": * #cvSection("Honors and Awards") #cvEntry( title: [Liggett Instructor Distinguished Faculty Teaching Award], organisation: [Department of Mathematics, UCLA], logo: "", date: [2022], location: [Los Angeles, CA], description: emph("\"Recognizing excellence in contribution to the instructional mission of the Department.\"") // tags: ("Database Systems", "Computer Networks", "Cybersecurity") ) #cvEntry( title: [Ivy Award], organisation: [Swarthmore College], logo: "", date: [2012], location: [Swarthmore, PA], description: emph("\"Recognizing the man of the graduating class who is outstanding in leadership, scholarship, and contributions to the Swarthmore College community.\"") ) #cvSubSection("Travel and Conference Awards") #pad(left: 0.5cm, table( columns: 2, stroke: none, align: (right, left), [2022], [SIAM Travel Award, _SIAM Conference on Mathematics of Data Science_], [2021], [SIAM Travel Award, _SIAM Conference on Applications of Dynamical Systems_], [2020], [Best Poster Award, _Northeastern Regional Conference on Complex Systems_] ) )
https://github.com/SundaeSwap-finance/sundae-specs
https://raw.githubusercontent.com/SundaeSwap-finance/sundae-specs/main/permissioned-pool/spec.typ
typst
#import "@preview/ssrn-scribe:0.5.0": * #import "@preview/note-me:0.2.1": note #import "@preview/chronos:0.1.0" #import "@preview/glossarium:0.4.1": * #show link: set text(blue) #show link: underline; #show: make-glossary #show: paper.with( font: "PT Serif", maketitle: true, title: [Sundae Protocol - Permissioned DeFi], subtitle: [ADA Handle DID Resolution], authors: ( ( name: "<NAME>", affiliation: "Sundae Labs", email: "<EMAIL>", note: "" ), ( name: "<NAME>", affiliation: "Kora Labs", email: "<EMAIL>", note: "", ), ( name: "<NAME>", affiliation: "Netki", email: "<EMAIL>", note: "", ), ), date: "October 2024", abstract: [ DeFi protocols and cryptocurrency ecosystems often struggles to attract liquidity, in large part because of the lack of regulatory clarity. Large institutions who wish to participate hesitate to do so because DeFi primitives are usually wholly permissionless. This puts them at regulatory risk, as their funds may be utilized in trades that fund illegal activities. Sundae Labs, Kora Labs, and Netki are collaborating to solve this problem. This specification outlines 3 separate standards that will enable seamless permissioned DeFi on top of the by-default permissionless Sundae Protocol on the Cardano blockchain. + ADA Handle DID Resolution - allow a user to associate #glspl("did") with their Cardano address + Permissioned Pools - allow liquidity pools that have extra configurable conditions attached to specific orders + Netki Integration - specify exactly how to utilize the above, along with Netki's compliance oracle infrastructure Together, these three proposals allow the creation of "clean" DeFi pools on top of the Sundae Protocol. ] ) = Introduction The goal of this specification is to outline how Sundae Labs, Kora Labs, and Netki are collaborating to allow permissioned and regulatory compliant “Clean” pools on Cardano, leveraging ADA Handles for easy @did Discovery, the SundaeSwap protocol for DeFi primitives, and Netki as a compliance oracle. The main thesis of this work is that DeFi is unnecessarily closed off from institutional participation because of regulatory risk. A large entity may have very deep liquidity that they would be interested in deploying to DeFi, but are unable to do so because they cannot bear the risk of those funds being used for money laundering, terrorism financing, or any other unsavory activities. In a classical finance setting, these actors would have the assurance that the financial institutions holding the funds and executing the trades have done their due diligence such as performing @kyc on each customer executing a trade. We strongly believe that permissionless DeFi provides options to legitimate actors in the developing world, and have spent 3 years building exactly those primitives. However, we also believe that the landscape of DeFi can provide for all users, including those that want more assurance behind who they are transacting with. Decentralized Identity standards allow entities to exchange sensitive identity information in a decentralized setting, without unduly exposing those details to the broader watching world. #note[ Originally, we had planned to include a proposal to extend #link("https://cips.cardano.org/cip/CIP-30", "CIP-30") to allow a dApp to communicate with a DID wallet. However, we discovered that this work is already under way via several great standards (such as #link("https://github.com/cardano-foundation/cf-identity-wallet", "this") work by the Cardano Foundation), is auxiliary to the objective of the project, and didn't make sense to duplicate that work. ] = ADA Handle DIDs ADA Handles are a “human readable address” product built on Cardano. By holding a Cardano Native Token with a given name at a specific address, tooling such as wallets can allow users to type in a human readable name, and unambiguously resolve that to an address. ADA Handles follow the CIP-68 standard to allow custom data to be associated with the ADA handle. For example, this capability is used today to specify a preferred background, profile picture, and highlight color to personalize your Handle, and dApps can match their theming to that personalization information. All CIP-68 ADA Handles begin with an asset name prefix of `000de140`. There also exists a corresponding token with a prefix of `000643b0`, and the same suffix, that corresponds to the “reference token”. The datum holding the reference token can be updated with a signature from the wallet holding the ADA handle to prove ownership, and a signature from Kora Labs to ensure that the structure of the datum stays well formed. The format of that datum according to CIP-68 is: ```cddl big_int = int / big_uint / big_nint big_uint = #6.2(bounded_bytes) big_nint = #6.3(bounded_bytes) metadata = { * metadata => metadata } / [ * metadata ] / big_int / bounded_bytes version = int extra = plutus_data datum = #6.121([metadata, version, extra]) ``` The purpose of `metadata` is to capture metadata about the NFT itself, while `extra` is arbitrary and can be determined by use case. ADA Handle has utilized the `extra` field for their personalization metadata. We propose standardizing on a `public_did` field added to this extra map in the case of ADA Handles. The public_did field will be a @cbor map, where the keys represent a human readable label, and the values represent a W3C DID Identifier, according to #link("https://www.w3.org/TR/did-core/#did-syntax", "this specification"). ```cddl did = bounded_bytes, ; UTF-8 did_map = { * label => did } metadata = { ; ... public_did: did_map } ``` One of these keys can be “default”, which should indicate the DID to select in non-interactive scenarios where the user cannot be prompted. If a dApp has some ADA Handle, and wishes to resolve a users DID, it can follow these steps: - Strip off the `000de140` prefix from the ADA Handle - Prepend the `000643b0` prefix to obtain the reference token name - Look up the UTxO holding the token with the same policy ID and the reference token name - Read the attached Datum, and deserialize it according to the CIP-68 specification - Read the `extra.public_did` field - Select one of the DIDs - If noninteractive, and one of the keys is `default`, use this one - If noninteractive, and there is only one DID, use this one - If noninteractive, and there are multiple DIDs, behavior is dependent on your use case - If interactive, and there is only one DID, use this one - If interactive, and there are multiple DIDs, prompt the user for which one to use, using the keys as labels From there, the dApp can use existing standards, such as #link("https://didcomm.org/", "DIDComm"), #link("https://identity.foundation/keri/did_methods/", "KERI"), or others to interact with the user and their identity. = Sundae Protocol Permissioned Pools SundaeSwap v3 is a fast, decentralized AMM based exchange built by Sundae Labs. We propose making the following updates to the Sundae v3 Protocol: - Define a new pool type, `v3-permissioned` - This new pool has a new `condition` property, which is of type `ScriptHash` - If the pool UTxO is spent with the `PoolScoop` redeemer, the `condition` must be present in the `withdrawals` of the transaction, effectively "blessing" the scoop. - To avoid locking user funds in the pool, a scoop that consists of only withdrawals is exempt from this condition. - A list of `allowed_conditions` is added to the `settings` datum, utilizing the `extensions` field - Minting a new `v3-permissioned` pool requires that the `condition` be one of the conditions in the `allowed_conditions` in the `settings` datum. - A new redeemer is added to `ManageRedeemer` to allow management of the condition. - If the pool UTxO is spent with the `Manage` redeemer, the `PoolManage` script must be present in the withdrawals of the transaction. (This is already the case.) - If the `PoolManage` script is invoked with the `ManageCondition` redeemer, the current condition must be present in the withdrawals, the new `condition` must be present in the `allowed_conditions` field in the settings datum. - Validation of the following conditions are moved to a default condition script: - Checking that the transaction is signed by a scooper - Checking that the correct protocol fee is paid - Checking that the correct pool fee is paid The above allows arbitrary additional logic to be layered on to the pool. Some examples of how this might be used: - A pool that only allows trading from 9 to 5 to synchronize with an existing market - A pool that only allows deposits or swaps if they bear a signed authorization token - A pool that allows swaps to pay a lower protocol or pool fee if they hold a threshold of token or a membership NFT = Netki Compliant Pools @netki is a company that provides compliance and identity verification services for Web3 contexts. We have been in discussions with them on a way to bring “clean” pools to SundaeSwap v3. Based on those discussions, we propose the following scheme that builds on the standards in the previous sections: - Each pool will include, in the `extensions` field, a list of approved compliance oracles - This can be set when the pool is created - Each order will include, in the `extra` field, a signed compliance token - The compliance token will consist of: - The users @did Identifier - The users public key - The destination address - Blake2b-256 hash of the cbor serialized details from the order - A valid range - The public key of the oracle - A signature from the compliance oracle for fields 1-6 - The condition will validate that each order: - Has an attached compliance token - The `owner` of an order is the same as the public key in the token - The `destination` of an order is the same as the one in the token - The order details hash to the same hash from the token - The valid range of the transaction entirely contains the valid range from the token - The oracle public key is one of those listed in the approved oracles - The signature is valid When a user wishes to perform a swap, the dApp then: - Notifies @netki that a swap has been started for a specific @did - Asks the identity wallet to show their @kyc credential to @netki - This may also redirect to a flow asking the user to perform @kyc with Netki to issue said credential - Netki checks the configured compliance rules against that @did - Netki notifies the dApp with an authorization token that can be included in the transaction - The dApp builds the transaction, asks for a signature, and submits it to the blockchain = Conclusion End to end, here is a sequence diagram that illustrates how the above three protocols enable a decentralized, compliant liquidity pool. #scale(x: 70%, y: 70%, origin: top + center)[ #chronos.diagram({ import chronos: * _par("B", display-name: "Browser") _par("W", display-name: "Wallet") _par("I", display-name: "Identity Wallet") _par("O", display-name: "Compliance Oracle") _par("C", display-name: "Cardano Blockchain") _par("P", display-name: "Pool Script") _seq("B", "W", comment: [`List Assets`]) _seq("W", "B") // Response _seq("B", "C", comment: [`Lookup Datum (handle)`]) _seq("C", "B") // Response _seq("B", "O", comment: [`Initiate Swap (details, did)`]) _seq("B", "I", comment: [`Request Swap (details, netki)`]) _seq("I", "O", comment: [`Approve(signature, kyc)`]) _seq("O", "B", comment: [`Approve(token)`]) _seq("B", "B", comment: [`Build Tx`]) _seq("B", "W", comment: [`Sign Tx`]) _seq("W", "B") _seq("B", "C", comment: [`Submit Tx`]) _seq("C", "P", comment: [`Validate Token`]) _seq("P", "C") _seq("C", "B", comment: [`Confirm Tx`]) }) ] = Glossary #print-glossary( ( ( key: "cbor", short: "CBOR", long: "Concise Binary Object Representation", desc: "A binary encoding format used heavily by the Cardano blockchain", ), ( key: "did", short: "DID", plural: "DIDs", long: "Decentralized Identifier", longplural: "Decentralized Identifiers", desc: "A standard for creating unique identifiers for entities in a decentralized setting", ), ( key: "b2hash", short: "Blake2b 256 Hash", desc: "A mathematical fingerprint of a set of data; a specific algorithm used heavily by the Cardano blockchain", ), ( key: "kyc", short: "KYC", long: "Know Your Customer", desc: "A series of steps and data collection policies that one entity might employ so it can transact with another and verify the identify of a customer", ), ( key: "netki", short: "Netki", desc: "A compliance oracle solution for web3, that can check the compliance of a transaction against a flexible and configurable ruleset." ) ), )
https://github.com/metamuffin/typst
https://raw.githubusercontent.com/metamuffin/typst/main/tests/typ/compute/data.typ
typst
Apache License 2.0
// Test reading structured data and files. // Ref: false --- // Test reading plain text files #let data = read("/hello.txt") #test(data, "Hello, world!") --- // Error: 18-32 file not found (searched at /missing.txt) #let data = read("/missing.txt") --- // Error: 18-28 file is not valid utf-8 #let data = read("/bad.txt") --- // Test reading CSV data. // Ref: true #set page(width: auto) #let data = csv("/zoo.csv") #let cells = data.at(0).map(strong) + data.slice(1).flatten() #table(columns: data.at(0).len(), ..cells) --- // Error: 6-16 file not found (searched at typ/compute/nope.csv) #csv("nope.csv") --- // Error: 6-16 failed to parse csv file: found 3 instead of 2 fields in line 3 #csv("/bad.csv") --- // Test reading JSON data. #let data = json("/zoo.json") #test(data.len(), 3) #test(data.at(0).name, "Debby") #test(data.at(2).weight, 150) --- // Error: 7-18 failed to parse json file: syntax error in line 3 #json("/bad.json") --- // Test reading TOML data. #let data = toml("/toml-types.toml") #test(data.string, "wonderful") #test(data.integer, 42) #test(data.float, 3.14) #test(data.boolean, true) #test(data.array, (1, "string", 3.0, false)) #test(data.inline_table, ("first": "amazing", "second": "greater") ) #test(data.table.element, 5) #test(data.table.others, (false, "indeed", 7)) #test(data.date_time, datetime( year: 2023, month: 2, day: 1, hour: 15, minute: 38, second: 57, )) --- // Error: 7-18 failed to parse toml file: expected `.`, `=`, index 15-15 #toml("/bad.toml") --- // Test reading YAML data #let data = yaml("/yaml-types.yaml") #test(data.len(), 7) #test(data.null_key, (none, none)) #test(data.string, "text") #test(data.integer, 5) #test(data.float, 1.12) #test(data.mapping, ("1": "one", "2": "two")) #test(data.seq, (1,2,3,4)) #test(data.bool, false) #test(data.keys().contains("true"), false) --- --- // Error: 7-18 failed to parse yaml file: while parsing a flow sequence, expected ',' or ']' at line 2 column 1 #yaml("/bad.yaml") --- // Test reading XML data. #let data = xml("/data.xml") #test(data, (( tag: "data", attrs: (:), children: ( "\n ", (tag: "hello", attrs: (name: "hi"), children: ("1",)), "\n ", ( tag: "data", attrs: (:), children: ( "\n ", (tag: "hello", attrs: (:), children: ("World",)), "\n ", (tag: "hello", attrs: (:), children: ("World",)), "\n ", ), ), "\n", ), ),)) --- // Error: 6-16 failed to parse xml file: found closing tag 'data' instead of 'hello' in line 3 #xml("/bad.xml")
https://github.com/LDemetrios/Typst4k
https://raw.githubusercontent.com/LDemetrios/Typst4k/master/src/test/resources/suite/foundations/decimal.typ
typst
--- decimal-constructor --- #test(decimal(10), decimal("10.0")) #test(decimal("-7654.321"), decimal("-7654.321")) #test(decimal("\u{2212}7654.321"), decimal("-7654.321")) #test(decimal({ 3.141592653 }), decimal("3.141592653000000012752934707")) #test(decimal({ -3.141592653 }), decimal("-3.141592653000000012752934707")) #test(type(decimal(10)), decimal) --- decimal-constructor-bad-type --- // Error: 10-17 expected integer, float, or string, found type #decimal(decimal) --- decimal-constructor-bad-value --- // Error: 10-17 invalid decimal: 1.2.3 #decimal("1.2.3") --- decimal-constructor-float-literal --- // Warning: 18-25 creating a decimal using imprecise float literal // Hint: 18-25 use a string in the decimal constructor to avoid loss of precision: `decimal("1.32523")` #let _ = decimal(1.32523) --- decimal-constructor-float-inf --- // Error: 10-19 float is not a valid decimal: float.inf #decimal(float.inf) --- decimal-constructor-float-negative-inf --- // Error: 10-20 float is not a valid decimal: -float.inf #decimal(-float.inf) --- decimal-constructor-float-nan --- // Error: 10-19 float is not a valid decimal: float.nan #decimal(float.nan) --- decimal-repr --- // Test the `repr` function with decimals. #test(repr(decimal("12.0")), "decimal(\"12.0\")") #test(repr(decimal("3.14")), "decimal(\"3.14\")") #test(repr(decimal("1234567890.0")), "decimal(\"1234567890.0\")") #test(repr(decimal("0123456789.0")), "decimal(\"123456789.0\")") #test(repr(decimal("0.0")), "decimal(\"0.0\")") #test(repr(decimal("-0.0")), "decimal(\"0.0\")") #test(repr(decimal("-1.0")), "decimal(\"-1.0\")") #test(repr(decimal("-9876543210.0")), "decimal(\"-9876543210.0\")") #test(repr(decimal("-0987654321.0")), "decimal(\"-987654321.0\")") #test(repr(decimal("-3.14")), "decimal(\"-3.14\")") #test(repr(decimal("-3.9191919191919191919191919195")), "decimal(\"-3.9191919191919191919191919195\")") #test(repr(decimal("5.0000000000")), "decimal(\"5.0000000000\")") #test(repr(decimal("4.0") - decimal("8.0")), "decimal(\"-4.0\")") --- decimal-display --- // Test decimals. #set page(width: auto) #decimal("12.0") \ #decimal("3.14") \ #decimal("1234567890.0") \ #decimal("0123456789.0") \ #decimal("0.0") \ #decimal("-0.0") \ #decimal("-1.0") \ #decimal("-9876543210.0") \ #decimal("-0987654321.0") \ #decimal("-3.14") \ #decimal("-3.9191919191919191919191919195") \ #decimal("5.0000000000") \ #(decimal("4.0") - decimal("8.0")) --- decimal-display-round --- // Display less digits. #calc.round(decimal("-3.9191919191919191919191919195"), digits: 4) \ #calc.round(decimal("5.0000000000"), digits: 4) --- decimal-expected-float-error --- // Error: 11-25 expected integer, float, or angle, found decimal // Hint: 11-25 if loss of precision is acceptable, explicitly cast the decimal to a float with `float(value)` #calc.sin(decimal("1.1")) --- decimal-expected-integer-error --- // Error: 11-25 expected integer, found decimal #calc.odd(decimal("1.1"))
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/visualize/image-06.typ
typst
Other
// Error: 8-29 file not found (searched at typ/visualize/path/does/not/exist) #image("path/does/not/exist")
https://github.com/Sckathach/ieee-typst-template
https://raw.githubusercontent.com/Sckathach/ieee-typst-template/main/main.typ
typst
MIT License
#import "template.typ": * #show: ieee_conference.with( title: "IEEE with Typst", header: "JOURNAL OF TYPST CLASS FILES, VOL. 18, NO. 9, SEPTEMBER 2020", footer: "979-8-3503-2934-6/23/\$31.00 ©2023 IEEE", abstract: [ This document describes the most common article elements and how to use the TYPST to produce files that are suitable for submission to the Institute of Electrical and Electronics Engineers (IEEE). ], authors: ( ( name: "<NAME>", department: [President], organization: [Bob Labs], location: [Paris, France], email: "<EMAIL>" ), ( name: "<NAME>", department: [Vice president], organization: [Bob Labs], location: [Paris, France], email: "<EMAIL>" ), ( name: "<NAME>", department: [Teck], organization: [Bob Labs], location: [Paris, France], email: "<EMAIL>" ), ), index-terms: ("Class", "IEEE", "TYPST", "paper", "style", "template", "typsetting"), bibliography-file: "refs.bib", ) #set par(first-line-indent: 0pt) = Introduction #dropcap("WELCOME to the Typst IEEE template. This template aims to provide the basis to create IEEE papers. Documentation and tutorials are available on the official Typst website: https://typst.app.") = Why Typst ? #set list(marker: []) - Typst is fast #emoji.rocket - Typst is open sourced #emoji.book.open - Typst is easy #emoji.checkmark.box More seriously: #set list(marker: [-]) - Previews your changes instantly. - Provides clear, understandable error messages. - Has a consistent styling system for configuring everything from fonts and margins to the look of headings and lists. - Uses familiar programming constructs instead of hard-to-understand macros. = How to use Typst ? A web playground is disponible on the official website, but it is also possible to use your favourite text editor thanks to the Typst lsp. Plugins exist for VSCode: https://github.com/Enter-tainer/typst-preview or even Vim: https://github.com/kaarmu/typst.vim. = Where to get the Typst Templates As always, the *Awesome GitHub Pages* are a good start. The Typst awesome page (https://github.com/qjcg/awesome-typst) already refers to articles about tools, integrations, templates, tutorials and much more! = Project Structure A Typst project possess a main script, usually `main.typ`, ressources that can be managed inside folders, templates and a bibliography file. The structure may look as follows: ` ressources/ ├─ png/ │ ├─ img1.png main.typ template.typ refs.bib ` #v(1em) = Using the Template The template is basically a function with arguments that has to be called at the start of the `main.typ` script. == Paper Title The title of your paper is coded as: ` title: "IEEE with Typst", ` #v(1em) == Author Names and Affiliations The author section should be coded as follows: ` authors: ( ( name: "<NAME>", department: [President], organization: [Bob Labs], location: [Paris, France], email: "<EMAIL>" ), ( name: "<NAME>", department: [Vice president], organization: [Bob Labs], location: [Paris, France], email: "<EMAIL>" ), ... ) ` #v(1em) == Header and Footer The header is present on each page whereas the footer is only printed in the first page. ` header: "JOURNAL OF TYPST CLASS FILES, VOL. 18, NO. 9, SEPTEMBER 2020", footer: "979-8-3503-2934-6/23/\$31.00 ©2023 IEEE", ` #v(1em) == Index terms and bibliography file ` index-terms: ("Class", "IEEE", "TYPST", "paper", "style", "template", "typsetting"), bibliography-file: "refs.bib", ` #v(1em) It might even be possible to add *keywords*: ` keywords: [bananas, apples, oranges], ` #v(1em) == Abstracts The coding is simply: ` abstract: [ This document describes the most common article elements and how to use the TYPST to produce files that are suitable for submission to the Institute of Electrical and Electronics Engineers (IEEE). ], ` #v(1em) == Initial Drop Cap Letter The first text paragraph uses a "drop cap" followed by the first word in ALL CAPS. This template uses the module `droplet`. It is imported at the beginning of the `template.typ` script with `#import "@preview/droplet:0.2.0": dropcap` and used as follows in the introduction section of `main.typ`: ` #dropcap("WELCOME to the Typst IEEE template. This template aims to provide the basis to create IEEE papers. Documentation and tutorials are available on the official Typst website: https://typst.app.") ` #v(1em) Height and gaps can be modified in the template file or directly in the main file by slightly adding arguments to the function. == Modify everything at will An important thing to take into account is that the whole code of the template is written in the `template.typ` file, so changing things is as simple as modifying a line in this file. = Body == Sections and Subsections Section headings are not as complicated as their LaTeX's counterparts. It is simply created with "=": ` = Section Head The text of your paragraph . . . == Subsection === Subsubsection ... ` #v(1em) == Citations to the Bibliography Coding for bibliography is included in the standard librairie. The `refs.bib` can be configured as follows: ` @article{example, title={Example}, author={Example, Example and Example, Example}, journal={Example Example Example Example}, volume={1}, pages={1--2}, year=2020, publisher={Example Example Inc.} } ` #v(1em) Then for a single citation code as follows: ` see @bob ` #v(1em) This will display as: see @bob For multiple citations code as follows: ` @bob1 @bob2 @bob3 ` #v(1em) This will display as @bob @bob2 @bob3 == Figures Figures are coded with the standard Typst command as follows: ` #figure( image("ressources/png/fig1.png"), caption: [This is the caption for one fig.] )<fig1> ` #v(1em) To cross-reference your figures in the text use the following code example: ` See figure @fig1 ... ` #v(1em) This will produce: See figure @fig1 . . . #figure( image("ressources/png/fig1.png"), caption: [This is the caption for one fig.] )<fig1> == Tables Tables should be coded with the standard Typst coding. The following example shows a simple table. ` #figure( table( columns: (auto, auto, auto), inset: 3pt, align: horizon, [Order of filter], [Arbitrary coefficients $e_m$], [coefficients $b_(i,j)$], [1],[$b_(i j)=hat(e). hat(beta_(i,j))$],[$b_(0 0)=0$], [2],[$beta_(2 2) = (1, -1, -1, 1, 1, 1)$],[], [3],[$b_(i j)=hat(e). hat(beta_(i j))$],[$b_(0 0)=0$], ), caption: [A simple table example], )<tab1> ` #v(1em) #figure( table( columns: (auto, auto, auto), inset: 3pt, align: horizon, [Order of filter], [Arbitrary coefficients $e_m$], [coefficients $b_(i,j)$], [1],[$b_(i j)=hat(e). hat(beta_(i,j))$],[$b_(0 0)=0$], [2],[$beta_(2 2) = (1, -1, -1, 1, 1, 1)$],[], [3],[$b_(i j)=hat(e). hat(beta_(i j))$],[$b_(0 0)=0$], ), caption: [A simple table example], )<tab1> #v(1em) == Lists In this section, we will consider three types of lists: simple unnumbered, numbered and bulleted. The basic list is implemented with dashes "-", but it can be changed to anything with the command: `#set list(marker: [->])`. #v(1em) *A simple list* - bananas - apples - camels coded as: ` - bananas - apples - camels ` \ *A simple numbered list* + bananas + apples + camels coded as: ` + bananas + apples + camels ` \ *A plain list* #set list(marker:[]) - bananas - apples - camels coded as: ` #set list(marker:[]) - bananas - apples - camels ` \ = Mathematical Typstography Simply _beautiful_... $ sum_(n=12 \ n!=i)^N lim_(u v w&=12 \ n&!=i j k) lr(angle.l a abs(e^(integral g_(\[i)d x))f_(j\]) angle.r) $ is coded as: ` $ sum_(n=12 \ n!=i)^N lim_(u v w&=12 \ n&!=i j k) lr(angle.l a abs(e^(integral g_(\[i)d x))f_(j\]) angle.r) $ ` \ and it supports functions... Create your function: `#let ket(X) = $lr(| #X angle.r)$`, and then use it! #let ket(X) = $lr(| #X angle.r)$ #let bra(X) = $lr(angle.l #X |)$ ` $ H ket(0) = 1/sqrt(2) ket(+) + 1/sqrt(2) ket(-) $ ` \ $ H ket(0) = 1/sqrt(2) ket(+) + 1/sqrt(2) ket(-) $ == Display Equations There are two ways of using expression, either inline: $forall x in Gamma$ or as a display style: $ Pi = sum_(k=1)^p ket(epsilon_k) bra(epsilon_k) "projects to" E = "vect"(epsilon_1, dots, epsilon_p) $<super_projection> is coded as follows: ` $ Pi = sum_(k=1)^p ket(epsilon_k) bra(epsilon_k) "projects to" E = "vect"(epsilon_1, dots, epsilon_p) $<super_projection> ` #v(1em) The difference lies in the spaces. If *both* "\$" are followed with at least one space, it will be displayed, otherwise it will stay inline. To reference this equation in the text use `@super_projection` Please see (@super_projection) is coded as follows: ` Please see (@super_projection) ` \ == Equation Numbering *Consecutive Numbering:* Equations within an article are numbered consecutively from the beginning of the article to the end, i.e., (1), (2), (3), (4), (5), etc. #v(1em) *Custom numbering:* This is possible with the `numbering` function. == Multi-line equations and alignment $ a &= c+d $ $ b &= e+f $ is coded as: ` $ a &= c+d $ $ b &= e+f $ ` \ == Example of a custom numbering #set math.equation(numbering: "(7a)") #counter(math.equation).update(0) $ f&=g $ $ f' &=g' $ $ cal(L)f &= cal(L)g $ is coded as: ` #set math.equation(numbering: "(7a)") #counter(math.equation).update(0) $ f&=g $ $ f' &=g' $ $ cal(L)f &= cal(L)g $ ` \ == Matrices Matrices are created with the `mat` function. It can be used with multiple arguments, such as follows: #set math.equation(numbering: "(1)") #counter(math.equation).update(5) #v(1em) *A simple matrix* $ mat( 0, 1 ; 1, 0 ) $ is coded as: ` mat( 0, 1 ; 1, 0 ) ` \ *A matrix with square brackets* $ mat( delim: "[", 0 , -1 ; 1 , 0 ) $ is coded as: ` mat( delim: "[", 0 , -1 ; 1 , 0 ) ` #v(1em) *A matrix with curly braces* $ mat( delim: "{", 1, 0 ; 0, -1 ) $ is coded as: ` mat( delim: "{", 1, 0 ; 0, -1 ) ` \ *A matrix with single verticals* $ mat( delim: "|", a, b ; c, d ) $ is coded as: ` mat( delim: "|", a, b ; c, d ) ` \ *A matrix with double verticals* $ mat( delim: "||", i, 0 ; 0, -i ) $ is coded as: ` mat( delim: "||", i, 0 ; 0, -i ) ` \ *A matrix with custom alignment* The last column is aligned differently: $ mat( a + b + c, u v, x - y, 27; a + b, u + v, z, 134& ) $ is coded as: ` mat( a + b + c, u v, x - y, 27; a + b, u + v, z, 134& ) ` \ *A matrix with vertical and horizontal rules* $ mat( augment: #( hline: 1, vline: (1, 2), ), a + b + c, u v, x - y, 27 ; a + b, u + v, z, 134 ) $ is coded as: ` mat( augment: #( hline: 1, vline: (1, 2), ), a + b + c, u v, x - y, 27 ; a + b, u + v, z, 134 ) ` \ == Cases Structures Cases are created using the `case` function. It is as simple as the creating a matrix: #set math.equation(numbering: none) $ z_m (t) = cases( 1\, space "if" beta_m (t), 0\, space "otherwise." ) $ is coded as follows: ` z_m (t) = cases( 1\, space "if" beta_m (t), 0\, space "otherwise." ) ` #v(1em) Make sure to escape characters such as "," when calling functions as "," are used to separate arguments. It may be possible to add "`space`" to ajust the spacing. #v(1em) == Attachs Attachs are made easy with the widly used "^" and "\_", but Typst add a simple command to reproduce the limits behaviour: $ d^(K M)_R = limits(arg min)_(d^(K M)_l){d^(K M)_1, dots, d^(K M)_6} $ is coded as follows: ` d^(K M)_R = limits(arg min)_(d^(K M)_l){d^(K M)_1, dots, d^(K M)_6} ` \ == Text Acronyms inside equations Simple text can be added everywhere inside equations with double quotes: $ "MSE" = 1/n sum_(i=1)^n (Y_i - hat(Y_i))^2 $ is given by: ` "MSE" = 1/n sum_(i=1)^n (Y_i - hat(Y_i))^2 ` #v(1em) = A Final Checklist + Typst is cool. + Typst is fast. + Typst is Rust. + Make sure to reads the clear, comprehensible and nearly complete documentation at https://typst.app/docs/. + Enjoy typing!
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/math/op-03.typ
typst
Other
// Test custom operator. $ op("myop", limits: #false)_(x:=1) x \ op("myop", limits: #true)_(x:=1) x $
https://github.com/tedaco1/typst-example
https://raw.githubusercontent.com/tedaco1/typst-example/main/document-files/science-template.typ
typst
MIT License
#let template( title: none, authors: (), abstract: [], doc, ) = { set align(center) text(17pt, title) let count = authors.len() let ncols = calc.min(count, 3) grid( columns: (1fr,) * ncols, row-gutter: 24pt, ..authors.map(author => [ #author.name \ #author.affiliation \ #link("mailto:" + author.email) ]), ) par(justify: false)[ *Abstract* #abstract ] set align(left) columns(2, doc) }
https://github.com/lucannez64/Notes
https://raw.githubusercontent.com/lucannez64/Notes/master/Extrema%20Multivariable%20function.typ
typst
#import "template.typ": * // Take a look at the file `template.typ` in the file panel // to customize this template and discover how it works. #show: project.with( title: "Extrema Multivariable function", authors: ( "<NAME>", ), date: "30 Octobre, 2023", ) #set heading(numbering: "1.1.") = Extrema Multivariable function <extrema-multivariable-function> == Definition <definition> The extrema of a multivariable function are the maximum and minimum values of the function over a given domain. In other words, they are the points at which the function takes on its highest and lowest values. In order to find the extrema of a multivariable function, we need to first find the critical points of the function, which are the points at which the partial derivatives of the function are zero or undefined. We can then evaluate the function at each of these critical points to find the maximum and minimum values. For example, consider the function $f lr((x comma y)) eq x^2 plus y^2$. The partial derivatives of this function are: $ frac(diff f, diff x) eq 2 x $ $ frac(diff f, diff y) eq 2 y $ The critical points of the function are the points where the partial derivatives are zero, which are the points $lr((0 comma 0))$ and $lr((0 comma 0))$. Evaluating the function at these points gives us $f lr((0 comma 0)) eq 0$ and $f lr((0 comma 0)) eq 0$, so the maximum and minimum values of the function are $0$. In general, finding the extrema of a multivariable function can be more complex than in the one-dimensional case, as the function may have multiple critical points and the extrema may not be unique. However, the tools of multivariable calculus can be used to analyze the behavior of the function near these critical points and to determine the maximum and minimum values. == Links <links> - #link("Partial Derivative.pdf")[Partial Derivative] - #link("Critical Points.pdf")[Critical Points]
https://github.com/Fr4nk1inCs/typreset
https://raw.githubusercontent.com/Fr4nk1inCs/typreset/master/src/bundles/font.typ
typst
MIT License
#import "../utils/font.typ": set-font
https://github.com/rice8y/cetzuron
https://raw.githubusercontent.com/rice8y/cetzuron/main/docs/ae/sample_ae.typ
typst
#import "@local/cetzuron:0.1.0": * #set page(width: auto, height: auto) #set text(lang: "ja", font: "TeX Gyre Termes", size: 10pt) #show regex("[\p{scx:Han}\p{scx:Hira}\p{scx:Kana}]"): set text(lang: "ja", font: "<NAME>", size: 10pt) #figure( ae(5, 3), caption: [ラベル表示 Ver. (short)] ) #figure( ae(5, 3, style: "full"), caption: [ラベル表示 Ver. (full)] ) #figure( ae(4, 2, style: "full", label: false), caption: [ラベル非表示 Ver. (full)] )
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/visualize/shape-square_02.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page // Test relative-sized child. #square(fill: eastern)[ #rect(width: 10pt, height: 5pt, fill: conifer) #rect(width: 40%, height: 5pt, stroke: conifer) ]
https://github.com/DashieTM/nix-introduction
https://raw.githubusercontent.com/DashieTM/nix-introduction/main/utils.typ
typst
#import "@preview/polylux:0.3.1": * #import "@preview/codelst:2.0.1": sourcecode #let regular_page_design() = [ #align( right + top, [ #v(10pt) #rotate( 0deg, [#pad( right: 10pt, image("./figures/nix.png", width: 50pt, height: 50pt), )], ) ], ) ]; #let default_footer() = [ #v(15pt) #columns(2, [ #colbreak() #align(right + bottom, [ #set text(size: 20pt) #counter(page).display("1 of 1", both: true) ]) ]) ]; #let img(name, width: auto, height: auto, fit: "cover") = [ #image("./figures/" + name, width: width, height: height, fit: fit) // #image("\\figures\\" + name, width: width, height: height, fit: fit) ] #let subtitle_slide(title, level: auto) = [ #polylux-slide[ #align(center + horizon, heading(level: level, [ #title #v(35pt) ])) ] ] #let custom_heading(num, use_line, level, name: "", custom_tag: "", al: left) = { let concat_name = str(name.replace(" ", "")) let concat_name = str(concat_name.replace("(", "")) let concat_name = str(concat_name.replace(")", "")) if custom_tag != "" { locate( loc => { let elem = query(heading.where(body: [#custom_tag]), loc) if elem == () { align(left, [#heading(numbering: num, level: level, name)#label(custom_tag)]) } else { align(left, [#heading(numbering: num, level: level, name)]) } }, ) } else if num != "" and type(name) == type("string") { locate( loc => { let elem = query(heading.where(body: [#name]).before(loc), loc) if elem == () { align(al, [#heading(numbering: num, level: level, name)#label(concat_name)]) } else { align(al, [#heading(numbering: num, level: level, name)]) } }, ) } else { align(al, [#heading(numbering: num, level: level, name)]) } if use_line { line(length: 100%) } } #let section(num: "1.1.1", use_line: false, custom_tag: "", align: left, name) = { custom_heading(num, use_line, custom_tag: custom_tag, name: name, al, 1) } #let subsection(num: "1.1.1", use_line: false, custom_tag: "", align: left, name) = { custom_heading(num, use_line, custom_tag: custom_tag, name: name, al: align, 2) } #let subsubsection(num: "1.1.1", use_line: false, custom_tag: "", align: left, name) = { custom_heading(num, use_line, custom_tag: custom_tag, name: name, al: align, 3) } #let subsubsubsection(num: "1.1.1", use_line: false, custom_tag: "", align: left, name) = { custom_heading(num, use_line, custom_tag: custom_tag, name: name, al: align, 4) } #let subsubsubsubsection(num: "1.1.1", use_line: false, custom_tag: "", align: left, name) = { custom_heading(num, use_line, custom_tag: custom_tag, name: name, al: align, 5) } #let benefits(items) = { set list(marker: [+]) set text(fill: green) for item in items { list.item(item) } } #let negatives(items) = { set list(marker: [-]) set text(fill: red) for item in items { list.item(item) } }
https://github.com/topdeoo/NENU-Thesis-Typst
https://raw.githubusercontent.com/topdeoo/NENU-Thesis-Typst/master/pages/bachelor-cover.typ
typst
#import "../fonts/fonts.typ": font-family, font-size #import "../utils/color.typ": colorize #import "../utils/datetime.typ": datetime-display-cn-cover #import "../utils/format.typ": fakebold #let bachelor-cover( //! 从 `thesis` 中传入的参数 two-side: false, fonts: (:), info: (:), //! 封面的其余参数 stoke-width: 0.5pt, info-inset: (x: 0pt, bottom: 1pt), info-key-width: 72pt, info-key-font: "宋体", info-value-font: "宋体", info-col-gutter: -3pt, info-row-gutter: 5pt ) = { fonts = font-family + fonts info = ( title: ("毕业论文中文题目"), title-en: ("毕业论文英文题目"), student-id: "123456", author: "张三", department: "信息科学与技术学院", major: "计算机科学与技术", supervisor: "李四", submit-date: datetime.today(), ) + info if type(info.submit-date) == datetime { info.submit-date = datetime-display-cn-cover(info.submit-date) } let info-key(body) = { rect( width: 100%, inset: info-inset, stroke: none, text( font: fonts.at(info-key-font, default: "宋体"), size: font-size.小三, body, ), ) } let info-value(key, body) = { set align(center) rect( width: 100%, inset: info-inset, stroke: (bottom: stoke-width + black), text( font: fonts.at(info-value-font, default: "宋体"), size: font-size.三号, bottom-edge: "descender", body, ), ) } let info-long-value(key, body) = { grid.cell( colspan: 2, info-value( key, body, ), ) } //! 渲染 pagebreak( weak: true, to: if two-side { "odd" }, ) pad(left: 2em)[ #grid( columns: (1fr, 1fr), column-gutter: 9em, text(size: font-size.小四, font: fonts.宋体)[ #fakebold[学校代码:10200] ], text(size: font-size.小四, font: fonts.宋体)[ #fakebold[学号:#info.student-id] ], ) ] v(22pt) set align(center) //! 校徽 & 校名 & 类型 let nenu-logo = read("../assets/nenu-logo.svg") nenu-logo = colorize(nenu-logo, blue.darken(30%)) image.decode(nenu-logo, width: 90pt) pad(image("../assets/nenu-title.svg", width: 126pt), top: 0cm, bottom: -0.8cm) text(size: font-size.小一, font: fonts.黑体, weight: "medium")[本科毕业论文] v(30pt) //! 标题 // TODO 长标题填充 text(size: font-size.二号, font: fonts.隶书)[ #fakebold[#info.title] ] v(3pt) text(size: font-size.三号, font: fonts.宋体)[ #fakebold[#info.title-en] ] v(40pt) //! 作者信息 pad( left: 20pt, block( width: 318pt, grid( columns: (info-key-width, info-key-width, 1fr), column-gutter: info-col-gutter, row-gutter: info-row-gutter, info-key("学生姓名:"), info-long-value("author", info.author), info-key("指导教师:"), info-long-value("major", info.major), info-key("所在学院:"), info-long-value("department", info.department), info-key("所在专业:"), info-long-value("major", info.major), ), ), ) v(100pt) grid( rows: 2, row-gutter: 10pt, text(size: font-size.小三, font: fonts.宋体)[ 东北师范大学 ], text(size: font-size.小三, font: fonts.宋体)[ #info.submit-date ] ) }
https://github.com/ToguyC/typst-packages
https://raw.githubusercontent.com/ToguyC/typst-packages/main/unige-notes/1.0.0/lib.typ
typst
#import "@preview/cetz:0.2.2": canvas, plot #import "@preview/fletcher:0.5.1" as fletcher: diagram, node, edge #let globalConfig(doc) = [ #set heading(numbering: "1.1") #set text(font: "New Computer Modern", size: 10pt) #show heading: it => { text(size: 14pt)[#it] v(1em) } #par(justify: true)[#doc] ] #let config( title: none, author: none, class: none, professor: none, semester: none, doc ) = { set align(center) set text(font: "New Computer Modern") let sem_title = if calc.even(semester) { "Spring" } else { "Fall" } + " " + datetime.today().display("[year]") set document(title: class + " Notes - " + sem_title) set page( header: context { if counter(page).get().first() > 7 { let elems = query(selector(heading).before(here())) let notes = smallcaps(class) if elems.len() == 0 { align(right, notes) } else { let body = elems.last().body sem_title + h(1fr) + notes + h(1fr) + body } } }, footer: context { let current = counter(page).get().first() if current > 6 { if calc.even(current) [ #text(size: 11pt, weight: "regular")[#counter(page).display("1")] #h(1fr) ] else [ #h(1fr) #text(size: 11pt, weight: "regular")[#counter(page).display("1")] ] } } ) v(1fr) text(size: 18pt, weight: "thin")[ #class \ Prof. #professor #sym.bar.h UNIGE ] v(10pt) "Notes by " + author v(10pt) "Computer Science Master " + sym.bar.h + " Semester " + str(semester) linebreak() sem_title v(1fr) set align(left) pagebreak() pagebreak() "This document is based on the work on <NAME>, a friend who studied at EPFL and wrote all of his classes notes using this method. To view the original repository, please visit the link bellow:" align(center)[#link("https://github.com/JoachimFavre/UniversityNotes")] "I made this document for my own use, but I thought that typed notes might be of interest to others. There are mistakes, it is impossible not to make any. If you find some, please feel free to share them with me (grammatical and vocabulary errors are of course also welcome). You can contact me at the following e-mail address:" align(center)[#text(font: "PT Mono", size: 9pt)[#link("mailto:<EMAIL>")]] "If you did not get this document through my GitHub repository, then you may be interested by the fact that I have one on which I put those typed notes and their \LaTeX{} code. Here is the link (make sure to read the README to understand how to download the files you're interested in):" align(center)[#link("https://github.com/ToguyC/Computer-Science-Master-Notes")] "Please note that the content does not belong to me. I have made some structural changes, reworded some parts, and added some personal notes; but the wording and explanations come mainly from the Professor, and from the book on which they based their course." "Since you are reading this, I will give you a little advice. Sleep is a much more powerful tool than you may imagine, so do not neglect a good night of sleep in favour of studying (especially the night before an exam). I wish you to have fun during your exams." v(1fr) align(center)[#text(style: "italic")[Version 2024-10-03]] v(1fr) pagebreak() pagebreak() v(1fr) grid( columns: (1fr, 0.5fr, 1fr), "", "", text(style: "italic")[To <NAME>, whose work has inspired me this note taking method. \ \ Rest in peace, nobody deserves to go so young.] ) v(1fr) pagebreak() pagebreak() outline(title: [Table of Contents], indent: 2em) pagebreak() globalConfig(doc) pagebreak() } #let chapterHeader(date: none, course: ()) = { grid( columns: (1fr, auto), align: horizon + center, column-gutter: 5pt, line(length: 100%, stroke: rgb("#004A7F")), text(fill: rgb("#004A7F"), size: 10pt)[#date.display("[weekday] [day] [month repr:long] [year]") #sym.bar.h #text(weight: "bold")[Cours #course.number : #course.name]] ) } #let parag(title: none, body) = { grid( columns: (3.2cm, auto), column-gutter: 0.3cm, text(size: 10pt)[*#title*], body ) } #let subparag(title: none, body) = { grid( columns: (2cm, auto), column-gutter: 0.3cm, grid.vline(x: 0, position: left, stroke: 0.5pt), pad(left: 0.3cm, text(size: 8pt)[_ #title _]), body ) } #let important(body) = { text(weight: "bold", fill: rgb("#004A7F"))[#body] } #let langle = sym.angle.l #let rangle = sym.angle.r #let frame() = (x, y) => ( left: 0.5pt, right: 0.5pt, top: if y <= 1 { 0.5pt } else { 0pt }, bottom: 0.5pt, ) #let foldl1(a, f) = a.slice(1).fold(a.first(), f) #let concat(a) = foldl1(a, (acc, x) => acc + x) #let nonumber(e) = math.equation(block: true, numbering: none, e) #let eq(es, numberlast: false) = if es.has("children") { let esf = es.children.filter(x => x != [ ]) let bodyOrChildren(e) = if e.body.has("children") { concat(e.body.children) } else { e.body } let hideEquation(e) = if e.has("numbering") and e.numbering == none { nonumber(hide(e)) } else [ $ #hide(bodyOrChildren(e)) $ #{if e.has("label") { e.label }} ] let hidden = box(concat( if numberlast { esf.slice(0, esf.len()-1).map(e => nonumber(hide(e))) + (hideEquation(esf.last()),) } else { esf.map(e => hideEquation(e)) })) let folder(acc, e) = acc + if acc != [] { linebreak() } + e let aligned = math.equation(block: true, numbering: none, esf.fold([], folder)) hidden style(s => v(-measure(hidden, s).height, weak: true)) aligned }
https://github.com/typst-community/valkyrie
https://raw.githubusercontent.com/typst-community/valkyrie/main/tests/logical/test.typ
typst
Other
#import "/src/lib.typ" as z #import "/tests/utility.typ": * #show: show-rule.with(); = logical/either == Input types #{ let schema = z.either(z.email(), z.ip()) let input-types = ( "ip (1.1.1.1)": "1.1.1.1", "email": "<EMAIL>", ) for (name, value) in input-types { utility-expect-eq( test: value, schema: schema, truth: value, )([It should validate #name]) } } #{ let schema = z.either( strict: true, z.dictionary(( seed: z.integer(), )), z.dictionary(( dynamic: z.boolean(), )), ) z.parse( (dynamic: false), schema, ) }
https://github.com/npikall/typst-templates
https://raw.githubusercontent.com/npikall/typst-templates/main/templates/thesis-bui.typ
typst
// Thesis Template for Civil- und Environmentalengineers at TU Wien #let conf( title:none, language:"en", doc ) = { // set the Pagelayout set page( paper:"a4", margin: ( left: 2.5cm, right: 2.5cm, top: 2.5cm, bottom: 2.5cm ), numbering: "1", header: context{ if counter(page).get().first() > 1 [ #set text(10pt) #h(1fr) #emph(title) #line(length: 100%, stroke: 0.7pt)]} , header-ascent: 30%, ) // Set the Heading Numbering set heading(numbering: "1.") // Set the equation numbering set math.equation(numbering: "(1)") // Set the paragraph style set par(leading: 0.55em, first-line-indent: 1.8em, justify: true) // Set the font style let mainfont = "CMU Serif"//"New Computer Modern" let titlefont = "CMU Sans Serif" //"Dejavu Sans" set text( font: mainfont, size: 11pt ) // Set Heading Style show heading: it => [ #set text(font: titlefont) #block(it) ] // Raw text and Spaces show raw: set text(font: "DejaVu Sans Mono") show par: set block(above: 1.4em, below: 1em) // Set Table style set table( stroke: none, gutter: auto, fill: none, inset: (right: 1.5em), ) // Referencing Figures show figure.where(kind: table): set figure(supplement:[Tab.], numbering: "1") if language == "aut" show figure.where(kind: image): set figure(supplement:[Abb.], numbering: "1",) if language == "aut" // List indentation set list(indent: 1em) set enum(indent: 1em) // Table of Contents Style show outline.entry.where( level: 1, ): it => { v(11pt, weak: true) text(font:titlefont, size:11pt ,[ #strong(it.body) #box(width: 1fr, repeat[]) #strong(it.page) ])} show outline.entry.where( level: 2, ): it => { it.body box(width: 1fr, repeat[.]) it.page} show outline.entry.where( level: 3, ): it => { it.body box(width: 1fr, repeat[.]) it.page } doc } // Make the Titleblock #let maketitle( thesis-type: [Bachelorthesis], title:[The Bachelorthesis Title], author:[<NAME>], date:datetime.today().display("[day].[month].[year]"), email:[<EMAIL>], matrnr:[12345678], abstract:[#lorem(60)], language:"en", ) = { let mainfont = "CMU Serif"//"New Computer Modern" let titlefont = "CMU Sans Serif" //"Dejavu Sans" // Printing the title // define title printing function let print_title( abstract_name:[], matrnr_name:[], date_name:[], ) = { align(center,[ #v(3em) #text(18pt, font: titlefont)[#thesis-type] #v(.5em) #text(18pt, font: titlefont)[*#title*] #v(1.2em) #text(10pt, font: titlefont)[#author] #v(0.5em) #text(10pt, font: titlefont)[ #email\ #matrnr_name: #matrnr\ #date_name: #date] #v(1em) #box(width: 85%, [ #text(11pt, font: titlefont)[#strong[#abstract_name]]\ #text(11pt, font: mainfont)[#abstract] #v(1em)]) ])} // print title depending on language if language == "aut" [ #print_title( abstract_name: [Kurzfassung], date_name: [Datum], matrnr_name: [Matr.Nr.], )] else [ #print_title( abstract_name: [Abstract], date_name: [Date], matrnr_name: [Matr.Nr.],) ] }
https://github.com/luiswirth/bsc-thesis
https://raw.githubusercontent.com/luiswirth/bsc-thesis/main/src/abstract.typ
typst
#page[ = Abstract Finite Element Exterior Calculus (FEEC) is a mathematical framework for formulating the theory of the Finite Element Method in a more elegant and a more general way. This thesis aims to develop an implementation of FEEC in Rust. So far there exists only one general library for FEEC on the internet, implemented in Haskell. Using Rust we will achieve better performance and create a library that is actually useful for solving problems. The creation of this working implementation is not the only added value this thesis tries to generate. This thesis should also serve as a guidenbook on how to create other similar implementations of FEEC in other programming languages or with different paradigmes. In this regard this work tries to lay out the necessary steps without relying too much on the vast and complicated mathematical framework that was created around FEEC. This thesis is more pragmatic and should appeal to a wider audiance than the original books and papers on FEEC. ]
https://github.com/Lypsilonx/Game-of-Intrigue
https://raw.githubusercontent.com/Lypsilonx/Game-of-Intrigue/main/cards.typ
typst
#import "data.typ": * #import "render.typ": * #let render_card(type, value: none, illegal: false, color: none, supertitle: none) = { let has_supertitle = supertitle != none and display_supertitle let is_role = supertitle == "Role" set text(font: "Inter Tight", weight: "medium") box( width: card_width, height: card_height, radius: card_cut_radius, clip: true, )[ #align(center + horizon)[ #box( width: 100% - 1em, height: 100% - 1em, radius: card_radius - 0.5mm, stroke: 0.3em + if color == none { black } else { color }, inset: 0.5em )[ // Accessiblity stripes #let thickness = 0.3em #place( top + left, dy: 2em, dx: -1em )[ #if colors.contains(color) { for i in range(colors.position(e => e == color) + 1) { if (calc.rem(i, 2) == 0) { v(thickness * 2, weak: true) } box( width: 10%, height: thickness, fill: white ) v(thickness, weak: true) } } ] #place( bottom + right, dy: -2em, dx: 1em )[ #rotate(180deg)[ #if colors.contains(color) { for i in range(colors.position(e => e == color) + 1) { if (calc.rem(i, 2) == 0) { v(thickness * 2, weak: true) } box( width: 10%, height: thickness, fill: white ) v(thickness, weak: true) } } ] ] // Symbol #let symbol_name = type #if (is_role) { symbol_name = "Role" } #let symbol = icon(symbol_name, color: color, width: 2.5em, height: 2.5em, side_distance: 0em) #if (symbols.keys().contains(type) and symbols.at(type) != none) { place( top + left, dy: -0.2em )[ #symbol ] place( bottom + right, dy: 0.2em )[ #rotate(180deg)[ #symbol ] ] } // Value #if (value != none) { place( top + right, dy: 0.2em )[ #text( size: 2em, weight: "bold", )[ #value ] ] place( bottom + left, dy: -0.2em )[ #rotate(180deg)[ #text( size: 2em, weight: "bold", )[ #value ] ] ] } #let center_symbol_scale = if (is_role) {300%} else {500%} #grid( align: top + center, columns: 1, rows: (auto, auto, 1fr), [ #v(if (has_supertitle) {1em} else {2.5em}) #text( weight: "extrabold", size: 2.5em )[ #if (has_supertitle) { text(supertitle, size: 0.7em) v(-1em) } #if (type == "") { " " } else { type } ] #v(0.5em) ], // Center Symbol box( )[ #scale(origin: center + horizon, x: center_symbol_scale, y: center_symbol_scale, reflow: true)[ #rotate(0deg)[ #symbol ] ] ], // Description box( width: 100%, height: 100% - 1em, clip: true, )[ #place(top + center)[ #line() ] #align(horizon)[ #text( )[ #get_description(type, value, illegal, color, supertitle) ] ] #place(bottom + center)[ #line() ] ] ) // Illegal #place( center + horizon, dx: -0em, dy: 2.2em )[ #skew(-skew_angle)[ #rotate(-skew_angle, reflow: true)[ #if (illegal) { text( weight: "extrabold", size: 2.5em )[ #box( width: 100%, height: 1em, fill: white )[ #box( width: 100%, height: 0.8em, fill: red )[ #text( size: 0.35em, fill: white )[ #repeat("ILLEGAL") ] ] ] ] } ] ] ] ] ] ] } #let render_card_back(value: none, illegal: false, role: false) = { set text(font: "Inter Tight", weight: "medium") box( width: card_width, height: card_height, radius: card_cut_radius, clip: true, fill: if role {white} else {black}, )[ #align(center + horizon)[ #box( width: 100% - 1em, height: 100% - 1em, radius: card_radius - 0.5mm, stroke: if role { 0.3em + black } else { 0.2em + white }, inset: (left: 0.5em, right: 0.5em, top: -0.5em, bottom: -0.5em), clip: true )[ // Illegal #place( center + horizon, dx: -1.2em, )[ #v(-0.8em) #rotate(-skew_angle)[ #skew(-skew_angle)[ #text( weight: "extrabold", size: 0.358em )[ #set align(center + top) #let secret_gradient = if role { gradient.linear(..colors, angle: 45deg, relative: "parent")} else { white } #v(2.5em) #text(font: "Chivo Mono", fill: secret_gradient)[ #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME " + if (value == none or value == 0) {"OF"} else { if (value < 10) {"0"} + str(value)}) #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") ] #v(2em, weak: true) #logo_text(color: if role {black} else {white}) #v(2em, weak: true) #text(font: "Chivo Mono", fill: secret_gradient)[ #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat(if (illegal) {"GAME OF ILLEGALE"} else {"GAME OF INTRIGUE"}) #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") #repeat("GAME OF INTRIGUE") #repeat("OF INTRIGUE GAME") #repeat("INTRIGUE GAME OF") ] ] ] ] ] ] ] ] } // Render #render(if sys.inputs.keys().contains("render_type") {sys.inputs.render_type} else {"single"}, render_card, render_card_back)
https://github.com/ClazyChen/Table-Tennis-Rankings
https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history_CN/2018/MS-03.typ
typst
#set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (1 - 32)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [1], [马龙], [CHN], [3652], [2], [樊振东], [CHN], [3485], [3], [迪米特里 奥恰洛夫], [GER], [3306], [4], [许昕], [CHN], [3298], [5], [蒂姆 波尔], [GER], [3243], [6], [水谷隼], [JPN], [3211], [7], [林高远], [CHN], [3207], [8], [张继科], [CHN], [3124], [9], [方博], [CHN], [3098], [10], [周雨], [CHN], [3097], [11], [闫安], [CHN], [3094], [12], [郑荣植], [KOR], [3021], [13], [乔纳森 格罗斯], [DEN], [3003], [14], [梁靖崑], [CHN], [2988], [15], [李尚洙], [KOR], [2971], [16], [黄镇廷], [HKG], [2959], [17], [上田仁], [JPN], [2958], [18], [马克斯 弗雷塔斯], [POR], [2946], [19], [雨果 卡尔德拉诺], [BRA], [2944], [20], [夸德里 阿鲁纳], [NGR], [2942], [21], [吉田雅己], [JPN], [2938], [22], [西蒙 高兹], [FRA], [2929], [23], [松平健太], [JPN], [2929], [24], [巴斯蒂安 斯蒂格], [GER], [2925], [25], [于子洋], [CHN], [2925], [26], [#text(gray, "吉田海伟")], [JPN], [2913], [27], [弗拉基米尔 萨姆索诺夫], [BLR], [2892], [28], [利亚姆 皮切福德], [ENG], [2892], [29], [特里斯坦 弗洛雷], [FRA], [2892], [30], [丹羽孝希], [JPN], [2887], [31], [SHIBAEV Alexander], [RUS], [2886], [32], [丁祥恩], [KOR], [2884], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (33 - 64)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [33], [吉村真晴], [JPN], [2880], [34], [王楚钦], [CHN], [2880], [35], [庄智渊], [TPE], [2875], [36], [徐晨皓], [CHN], [2871], [37], [基里尔 格拉西缅科], [KAZ], [2866], [38], [张本智和], [JPN], [2866], [39], [LI Ping], [QAT], [2861], [40], [朴申赫], [PRK], [2861], [41], [森园政崇], [JPN], [2851], [42], [朱霖峰], [CHN], [2849], [43], [刘丁硕], [CHN], [2845], [44], [艾曼纽 莱贝松], [FRA], [2839], [45], [博扬 托基奇], [SLO], [2839], [46], [张禹珍], [KOR], [2837], [47], [帕特里克 弗朗西斯卡], [GER], [2833], [48], [克里斯坦 卡尔松], [SWE], [2831], [49], [#text(gray, "陈卫星")], [AUT], [2826], [50], [KIM Donghyun], [KOR], [2826], [51], [卢文 菲鲁斯], [GER], [2825], [52], [林钟勋], [KOR], [2821], [53], [奥马尔 阿萨尔], [EGY], [2818], [54], [吉村和弘], [JPN], [2813], [55], [大岛祐哉], [JPN], [2809], [56], [马蒂亚斯 法尔克], [SWE], [2806], [57], [薛飞], [CHN], [2797], [58], [GERELL Par], [SWE], [2797], [59], [#text(gray, "李廷佑")], [KOR], [2791], [60], [帕纳吉奥迪斯 吉奥尼斯], [GRE], [2786], [61], [ROBLES Alvaro], [ESP], [2781], [62], [雅克布 迪亚斯], [POL], [2780], [63], [廖振珽], [TPE], [2777], [64], [周恺], [CHN], [2777], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (65 - 96)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [65], [安德烈 加奇尼], [CRO], [2776], [66], [ZHAI Yujia], [DEN], [2772], [67], [WALTHER Ricardo], [GER], [2765], [68], [贝内迪克特 杜达], [GER], [2764], [69], [达科 约奇克], [SLO], [2762], [70], [村松雄斗], [JPN], [2761], [71], [王臻], [CAN], [2754], [72], [斯特凡 菲格尔], [AUT], [2754], [73], [HO Kwan Kit], [HKG], [2753], [74], [蒂亚戈 阿波罗尼亚], [POR], [2752], [75], [陈建安], [TPE], [2747], [76], [赵胜敏], [KOR], [2746], [77], [木造勇人], [JPN], [2746], [78], [OUAICHE Stephane], [ALG], [2746], [79], [KOU Lei], [UKR], [2744], [80], [WANG Zengyi], [POL], [2743], [81], [MONTEIRO Joao], [POR], [2742], [82], [<NAME>], [IND], [2741], [83], [#text(gray, "MATTENET Adrien")], [FRA], [2735], [84], [<NAME>], [JPN], [2733], [85], [林昀儒], [TPE], [2733], [86], [TAKAKIWA Taku], [JPN], [2732], [87], [汪洋], [SVK], [2730], [88], [周启豪], [CHN], [2722], [89], [及川瑞基], [JPN], [2720], [90], [沙拉特 卡马尔 阿昌塔], [IND], [2718], [91], [PERSSON Jon], [SWE], [2714], [92], [#text(gray, "WANG Xi")], [GER], [2712], [93], [詹斯 伦德奎斯特], [SWE], [2710], [94], [高宁], [SGP], [2709], [95], [TAZOE Kenta], [JPN], [2707], [96], [HABESOHN Daniel], [AUT], [2705], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (97 - 128)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [97], [托米斯拉夫 普卡], [CRO], [2701], [98], [PARK Ganghyeon], [KOR], [2696], [99], [MATSUYAMA Yuki], [JPN], [2692], [100], [金珉锡], [KOR], [2691], [101], [奥维迪乌 伊奥内斯库], [ROU], [2690], [102], [诺沙迪 阿拉米扬], [IRI], [2688], [103], [TREGLER Tomas], [CZE], [2687], [104], [KANG Dongsoo], [KOR], [2683], [105], [#text(gray, "ELOI Damien")], [FRA], [2683], [106], [罗伯特 加尔多斯], [AUT], [2681], [107], [#text(gray, "FANG Yinchi")], [CHN], [2681], [108], [TSUBOI Gustavo], [BRA], [2680], [109], [宇田幸矢], [JPN], [2680], [110], [SALIFOU Abdel-Kader], [FRA], [2675], [111], [ROBINOT Quentin], [FRA], [2674], [112], [LIVENTSOV Alexey], [RUS], [2674], [113], [LAM Siu Hang], [HKG], [2673], [114], [安东 卡尔伯格], [SWE], [2672], [115], [NG Pak Nam], [HKG], [2668], [116], [MATSUDAIRA Kenji], [JPN], [2666], [117], [TAKAMI Masaki], [JPN], [2666], [118], [江天一], [HKG], [2663], [119], [阿德里安 克里桑], [ROU], [2661], [120], [安宰贤], [KOR], [2656], [121], [AKKUZU Can], [FRA], [2653], [122], [#text(gray, "RYUZAKI Tonin")], [JPN], [2644], [123], [SZOCS Hunor], [ROU], [2642], [124], [神巧也], [JPN], [2642], [125], [LANDRIEU Andrea], [FRA], [2640], [126], [<NAME>], [FIN], [2638], [127], [WALKER Samuel], [ENG], [2638], [128], [SAKAI Asuka], [JPN], [2638], ) )
https://github.com/mgoulao/IST-MSc-Thesis-Typst-Template
https://raw.githubusercontent.com/mgoulao/IST-MSc-Thesis-Typst-Template/main/README.md
markdown
# IST-MSc-Thesis-Typst-Template
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/string-10.typ
typst
Other
// Test the `contains` method. #test("abc".contains("b"), true) #test("b" in "abc", true) #test("1234f".contains(regex("\d")), true) #test(regex("\d") in "1234f", true) #test("abc".contains("d"), false) #test("1234g" in "1234f", false) #test("abc".contains(regex("^[abc]$")), false) #test("abc".contains(regex("^[abc]+$")), true)
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/bugs/columns-1_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page #set page(height: 70pt) Hallo #columns(2)[ = A Text = B Text ]
https://github.com/loqusion/typix
https://raw.githubusercontent.com/loqusion/typix/main/docs/api/derivations/common/typst-opts-example.md
markdown
MIT License
<!-- markdownlint-disable-file first-line-h1 --> <!-- ANCHOR: head --> ```nix { format = "png"; ppi = 300; } ``` ...will result in a command like: <!-- ANCHOR_END: head --> <!-- ANCHOR: typstcompile --> ```sh typst compile --format png --ppi 300 <source> <output> ``` <!-- ANCHOR_END: typstcompile --> <!-- ANCHOR: typstwatch --> ```sh typst watch --format png --ppi 300 <source> <output> ``` <!-- ANCHOR_END: typstwatch -->
https://github.com/nathanielknight/tsot
https://raw.githubusercontent.com/nathanielknight/tsot/main/src/phase_prelude.typ
typst
#import("utils_phase.typst") #import("util.typst") #import "theme.typst" #show: theme.common #show: theme.phase #utils_phase.title("The Prelude") = Instructions In this phase you'll introduce the characters, their mission, and the setting. - Make moves from your playbook or from this sheet - Once you've made a move check it off; you can't make it again - Answer the questions in each move with a beat in the film. This can be a few shots, a montage or a whole scene. - You can always ask other players for input, ask that their character be in your scene, or ask them to play an extra character. - Make moves until there are none left or until it makes sense to move to the Encounter Phase = Moves #util.checklist[ - *What kind of crew are the characters in?* Choose a concept and a trait from the Crew Elements sheet, or make up your own. How are the crew's specialties, dynamics, and general vibe communicated to the audience? - *What kind of mission are the characters on?* Choose a concept and a trait from the Mission Elements sheet, or make up your own. How are the mission's objectives and stakes communicated to the audience? - *Where is the mission taking place?* Choose a concept and a trait from the Mission Elements sheet, or make up your own. How is the setting and its challenges, dangers, etc. communicated to the audience? - *Extra colour* _(4+ players only)_ Add a trait to the Mission, Crew, or Setting, choosing from the Elements sheets or making up your own. How is this additional detail communicated to the audience? ]
https://github.com/drupol/master-thesis
https://raw.githubusercontent.com/drupol/master-thesis/main/resources/typst/ch3-table-conclusion.typ
typst
Other
#set text( size: .80em, hyphenate: true, ) #set par(justify: true) #set text() #table( columns: 6, stroke: none, align: (right,) + (left,) * 2 + (center,) * 4, table.header( table.cell(rowspan: 2, align: horizon + center)[], table.vline(stroke: .5pt), table.cell(rowspan: 2, align: horizon + center)[Pro], table.vline(stroke: .5pt), table.cell(rowspan: 2, align: horizon + center)[Cons], table.vline(stroke: .5pt), table.cell(colspan: 3)[Reproducible\ within the same\ hardware architecture], [In space], table.vline(stroke: .5pt + black.lighten(75%)), [In time], table.vline(stroke: .5pt + black.lighten(75%)), [Environment], table.hline(stroke: .5pt), ), table.cell(align: horizon + left)[1. #link(<ch3-tool1>)[Bare\ compilation]], [ - Full control over compilation - Direct understanding of dependencies inherited from host system ], [ - Prone to #emph["it works on my machine"] issues - Lacks isolation and dependency management ], table.cell(align: horizon + center, text(size: 2em)[\u{00D7}]), table.cell(align: horizon + center, text(size: 2em)[\u{00D7}]), table.cell(align: horizon + center, text(size: 2em)[\u{00D7}]), table.hline(stroke: .5pt + black.lighten(75%)), table.cell(align: horizon + left)[2. #link(<ch3-tool2>)[Docker]], [ - Better isolation and dependency management thanks to containerization - Isolation from host system - Popular solution, widely adopted ], [ - Potential variability due to base images and package management - Additional layer of abstraction due to containerization ], table.cell(align: horizon + center, text(size: 2em)[\u{223C}]), table.cell(align: horizon + center, text(size: 2em)[\u{223C}]), table.cell(align: horizon + center, text(size: 2em)[\u{223C}]), table.hline(stroke: .5pt + black.lighten(75%)), table.cell(align: horizon + left)[3. #link(<ch3-tool3>)[Guix]], table.cell(align: left + horizon)[ - Deterministic builds with explicit dependency specification - Functional package management - Immutable software environments - Isolation and environment reproducibility - No containerization overhead - Prone to long-term reproducibility ], [ - Steep learning curve - Paradigm shift from traditional package management systems required - Limited repository of packages ], table.cell(align: horizon + center, rowspan: 2, text(size: 2em)[\u{2713}]), table.cell(align: horizon + center, rowspan: 2, text(size: 2em)[\u{2713}]), table.cell(align: horizon + center, rowspan: 2, text(size: 2em)[\u{2713}]), table.hline(stroke: .5pt + black.lighten(75%)), table.cell(align: horizon + left)[4. #link(<ch3-tool4>)[Nix]], table.cell(align: left + horizon)[ - Deterministic builds with explicit dependency specification - Functional package management - Immutable software environments - Isolation and environment reproducibility - No containerization overhead - Vast repository of packages ], [ - Steep learning curve - Pradigm shift from traditional package management systems required ], table.footer( table.cell( align: right, colspan: 6, text(size: .7em)[ Legend: \u{2713} = Supported, \u{223C} = Partially supported, \u{00D7} = Not supported ], ), ), )
https://github.com/Omar-Majdoub/lab3
https://raw.githubusercontent.com/Omar-Majdoub/lab3/main/lab3/LAB3.typ
typst
#import "@preview/charged-ieee:0.1.0": ieee #set page(footer: context [ ISET BIZERTE #h(1fr) #counter(page).display( "1/1", both: true, ) ]) #show: ieee.with( title: [#text(smallcaps("Lab #3: Web Application with Genie"))], authors: ( ( name: "<NAME>", department: [Dept. of EE (AII21)], organization: [ISET Bizerte --- Tunisia], email: "<EMAIL>" ), ), index-terms: ("Scientific writing", "Typesetting", "Document creation", "Syntax"), bibliography: bibliography("refs.bib"), ) = Introduction In this lab,i createD a basic web application using *Genie framework* in *Julia*. The application will allow us to control the behaviour of a sine wave, given some adjustble parame- ters #figure( image("50237769.png", width: 50%, fit: "contain"), caption: "Genie framework" ) <fig:repl> = Exercise Were going to add two extra sliders that modify the behaviour of the sine wave graph: *- First* : adding a slide that will modify the Phase _Phase_ ranging between $-pi$ and $pi$, changes by a step of $pi/100$ #figure( image("pha.png", width: 100%, fit: "contain"), caption: "Adding the phase function in julia" ) <fig:repl> #figure( image("phase.png", width: 100%, fit: "contain"), caption: "Adding slide for phase" ) <fig:repl> *- Second* : Adding a slide that will modify the offset _Offset_ varies from $-0.5$ to $1$, by a step of $0.1$.] #figure( image("off.png", width: 100%, fit: "contain"), caption: "Adding the offset function in julia" ) <fig:repl> #figure( image("offset.png", width: 100%, fit: "contain"), caption: "Adding slide for offset" ) <fig:repl> = Result *- Before* #figure( image("Capture d’écran 2024-05-05 122110.png", width: 100%, fit: "contain"), caption: "Before Adding the two slide" ) <fig:repl> *- After* #figure( image("courb.png", width: 100%, fit: "contain"), caption: "After Adding the two slide" ) <fig:repl>
https://github.com/lphoogenboom/typstThesisDCSC
https://raw.githubusercontent.com/lphoogenboom/typstThesisDCSC/master/typFiles/chapter.typ
typst
// !!!! // STUDENTS, DO NOT EDIT THIS FILE! // !!!! #import "../projectInfo.typ": student, report #let chapter( content: "Add a content argument to function call", chapterTitle: "Chapter Title", body ) = { let topMargin = 2.5cm+1.35cm show heading.where(depth: 1): it => block(it.body); set page( numbering: "1", header: [ #locate(loc => { let titlePage = counter(page).at(label(lower(chapterTitle))).first() let thisPage = counter(page).at(loc).first() set align(if calc.rem(thisPage, 2) == 0 { left } else { right }) if thisPage != titlePage {counter(page).display("1"); v(-9pt) ; line(length: 100%, stroke: 0.5pt)} else {} }) ], header-ascent: 21.4%, footer-descent: 9%, footer: [ #locate(loc => { let n = counter(page).at(loc).first() if calc.rem(n,2) == 0 { [ #stack(dir: ltr, text(font: "New Computer Modern Sans")[#student.name], align(right)[#text(font: "New Computer Modern Sans")[#report.type]]) ] } else { [ #stack(dir: ltr, text(font: "New Computer Modern Sans")[#report.type], align(right)[#text(font: "New Computer Modern Sans")[#student.name]]) ] } }) ] ) set align(top) v(116pt-topMargin+3.02cm) line(length: 100%, stroke: 2pt) let pad = 5pt v(pad) align(top+right, [ #context{ let n = counter(heading).get().first()+1 [#text(size: 25pt, weight: 0, style: "normal",font: "New Computer Modern")[Chapter #n]] }]) v(pad) line(length: 100%, stroke: 2pt) v(-20pt) align( right, //[#text(size: 24.4pt, font: "New Computer Modern Sans", weight: "bold")[#chapterTitle ]#label(lower(chapterTitle))] text(size: 20pt, font: "New Computer Modern Sans")[#heading(outlined: true,[#chapterTitle])#label(lower(chapterTitle))] ) v(87pt) set text(size: 10.5pt,font: "New Computer Modern Math", weight: 500) [#content] pagebreak(to: "even", weak: true) body }
https://github.com/goshakowska/Typstdiff
https://raw.githubusercontent.com/goshakowska/Typstdiff/main/README.md
markdown
# TypstDiff ### <NAME>, <NAME>, <NAME> ## Introduction Tool created with Pandoc to compare two typst files. It marks things deleted from first file and marks differently things added to the second file. ## Run documentation All information about tool, its working process and how to use it is located in documentation written in mkdocs. To run documentation server use command `mkdocs serve` in the folder `documentation`. If mkdocs is not installed use command `pip install mkdocs` or run virtual environment `poetry shell` and install all dependencies with `poetry install` (poetry can be installed with `pip install poetry`) ### Issues As both tools - Pandoc and Typst are new and still developing there is no full support for typst in Pandoc. Because of that it is not possible to notice all changes made in files, but tool will be developed.
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/unichar/0.1.0/ucd/block-2440.typ
typst
Apache License 2.0
#let data = ( ("OCR HOOK", "So", 0), ("OCR CHAIR", "So", 0), ("OCR FORK", "So", 0), ("OCR INVERTED FORK", "So", 0), ("OCR BELT BUCKLE", "So", 0), ("OCR BOW TIE", "So", 0), ("OCR BRANCH BANK IDENTIFICATION", "So", 0), ("OCR AMOUNT OF CHECK", "So", 0), ("OCR DASH", "So", 0), ("OCR CUSTOMER ACCOUNT NUMBER", "So", 0), ("OCR DOUBLE BACKSLASH", "So", 0), )
https://github.com/rxt1077/it610
https://raw.githubusercontent.com/rxt1077/it610/master/markup/slides/buzz.typ
typst
#import "/templates/slides.typ": * #show: university-theme.with( short-title: [Buzzwords], ) #title-slide( title: [DevOps, SRE, and other Buzzwords], ) #focus-slide()[ What does a sysadmin do? ] #matrix-slide()[ Configure #licensed-image( file: "/images/configure-icon.svg", license: "CC0", title: [Settings SVG Vector], url: "https://www.svgrepo.com/svg/13688/settings", ) ][ Operate #licensed-image( file: "/images/operate-icon.svg", license: "CC0", title: [Press Button SVG Vector], url: "https://www.svgrepo.com/svg/58818/press-button", ) ][ Maintain #licensed-image( file: "/images/maintain-icon.svg", license: "CC0", title: [Wrench SVG Vector], url: "https://www.svgrepo.com/svg/535743/wrench", ) ] #alternate( title: [DevOps Patterns], image: licensed-image( file: "/images/devops.svg", license: "CC BY-SA 4.0", title: [Devops-toolchain.svg], url: "https://upload.wikimedia.org/wikipedia/commons/0/05/Devops-toolchain.svg", author: [Kharnagy], author-url: "https://commons.wikimedia.org/wiki/User:Kharnagy", ), text: [ - Hardware in the cloud - Everything is code - "YAML engineers" - Devs doing sysadmin or sysadmins doing dev? ], ) #alternate( title: [Site Reliability Engineering (SRE)], image: licensed-image( file: "/images/sre.jpg", license: "CC BY-NC-ND 4.0", title: [Service Reliability Hierarchy], url: "https://sre.google/sre-book/part-III-practices/#fig_part-practices_reliability-hierarchy", author: [<NAME>, <NAME>, <NAME>, <NAME>], author-url: "https://www.oreilly.com/library/view/site-reliability-engineering/9781491929117/", ), text: [ - Change management - Automation - CI/CD Pipelines - Testing - "Treat operations as if it's a software problem." -- Google ], ) #slide( title: [Is the Sysadmin Dead?], side-by-side(columns: (33%, 1fr))[ #v(1fr) #rect( stroke: 4pt, width: 60%, height: 60%, radius: ( top-left: 20%, top-right: 20% ), fill: gray, [ #v(20%) #h(20%) #text(size: 50pt, weight: "bold", [RIP \ ]) #h(10%) #text(size: 30pt, style: "italic", [sysadmin]) ] ) #v(1fr) ][ #v(1fr) - Nope, they're just coding more - Expertise in uptime and hardware optimization helps solve software problems - An empowered sysadmin can do more, much more - Experience matters #v(1fr) ] ) #alternate( title: [What does it all mean?], image: image("/images/sysadmin-heroes.svg"), text: [ - Failure is an option! - A whole lot of virtualization - Continuous deployment is expected - Rapid (automated) response - Downtime for maintenance is passe - Make it run on anything - Automate, automate, automate! - Save configurations, not images ], ) #focus-slide()[ What solutions are we using _now_? ] #matrix-slide(columns: 3, rows: 3)[ *Automation Tools* \ Ansible \ Terraform \ Chef/Puppet ][ *VMs* \ Amazon EC2 \ Digital Ocean \ Linode ][ *Containers* \ LXC \ K8s \ Podman ][ *Reproducible Systems* \ NixOS \ GUIX ][ *Function as a Service* \ AWS Lambda \ Google Cloud Functions \ Azure Functions ][ *Platform as a Service* \ Heroku \ Fly.io \ Railway.app ][ *Immutable Linux* \ ChromeOS \ Vanilla OS \ Endless OS ][ *Hybrid Cloud* \ OpenStack, K8s, etc. \ Any big IaaS ][ *Self-Hosted AI* ] #slide( title: [How do we keep up?], [ - We've talked about and used lots of tools, but they aren't the only ones - Concepts > Use Instances - Read, share, contribute - Learn from past mistakes - Don't believe the hype. Think for yourself! - Argue and learn - Insular sysadmin culture _is_ dead - My personal thoughts on the future: distributed, decentralized, federated, mesh ] )
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/bugs/place-spacing_01.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page #show place: set block(spacing: 4em) Paragraph before place. #place(rect()) Paragraph after place.
https://github.com/SillyFreak/typst-prequery
https://raw.githubusercontent.com/SillyFreak/typst-prequery/main/CHANGELOG.md
markdown
MIT License
# [unreleased](https://github.com/SillyFreak/typst-prequery/releases/tag/) ## Added ## Removed ## Changed ## Migration Guide from v0.1.X --- # [v0.1.0](https://github.com/SillyFreak/typst-prequery/releases/tag/v0.1.0) Initial Release
https://github.com/SillyFreak/typst-prequery
https://raw.githubusercontent.com/SillyFreak/typst-prequery/main/src/utils.typ
typst
MIT License
#let boolean-input(name) = { let bools = ("true": true, "false": false) let value = sys.inputs.at(name, default: "false") assert(value in bools, message: "--input " + name + "=... must be set to true or false if present") bools.at(value) }
https://github.com/howardlau1999/sysu-thesis-typst
https://raw.githubusercontent.com/howardlau1999/sysu-thesis-typst/master/functions/helpers.typ
typst
MIT License
#let zip(..lists) = { let lists = lists.pos() if lists == () { () } else { let ret = () let len = lists.fold( lists.first().len(), (a, b) => if a > b.len() { b.len() } else { a } ) for i in range(0, len) { let curr = () for list in lists { curr.push(list.at(i)) } ret.push(curr) } ret } }
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/compiler/block-05.typ
typst
Other
// Double block creates a scope. #{{ import "module.typ": b test(b, 1) }} // Error: 2-3 unknown variable: b #b
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/game-theoryst/0.1.0/doc/gallery/full-example.typ
typst
Apache License 2.0
#import "../../src/lib.typ": * #set page( width: auto, height: auto, margin: 0.25em ) #nfg( players: ([A\ Joe], [Bas Pro]), s1: ([$x$], [a]), s2: ("x", "aaaa", [$a$]), pad: ("x": 12pt, "y": 10pt), eliminations: ("s11", "s21", "s22"), ejust: ( s11: (x: (0pt, 36pt), y: (-3pt, -3.5pt)), s22: (x: (-10pt, -12pt), y: (-10pt, 10pt)), s21: (x: (-3pt, -9pt), y: (-10pt, 10pt)), ), mixings: (hmix: ($p$, $1-p$), vmix: ($q$, $r$, $1-q-r$)), custom-fills: (hp: maroon, vp: navy, hm: purple, vm: fuchsia, he: gray, ve: gray), [$0,vul(100000000)$], [$0,1$], [$0,0$], [$hul(1),1$], [$0, -1$], table.cell(fill: yellow.lighten(30%), [$hful(0),vful(0)$]) )
https://github.com/i-am-wololo/cours
https://raw.githubusercontent.com/i-am-wololo/cours/master/TP/i21/1/templates.typ
typst
#let project(title: "", body) = { set document(date: datetime.today(), author: "<NAME>") set page(numbering: "1/1", header: [ #smallcaps(title ) #smallcaps(text(datetime.today().display())) #h(1fr) Ecrit par Mehdi #line(length: 100%) ] ) set list(indent:3pt) set text(font: "RobotoMono Nerd Font") body } #let title(body) = { set align(center) text(body, style: "oblique") } #let exercices(body)={ show heading: set text(green) set text(font: "Hack Nerd Font Mono") body } #let placeholder()={ text(style: "italic", "Placeholder (Comming Soon)") } #let defcount = counter("definition") #let definition(txt, title: "") = block[ #set rect(radius:8pt) #rect[#defcount.step() *Définition #defcount.display(): #title* #txt ] ] #let chshtemplate(matiere: "", body) = { set text(font: "Hack Nerd Font", size: 8pt ) title("fiche de "+matiere+" de <NAME>") columns(2, body) } #let truthtable(body, sign) = { table(columns:(auto, auto, auto), ..body) } #let py(body) = { raw(lang:"py", body) }
https://github.com/RaphGL/ElectronicsFromBasics
https://raw.githubusercontent.com/RaphGL/ElectronicsFromBasics/main/DC/chap3/9_safe_meter_usage.typ
typst
Other
#import "../../core/core.typ" === Safe meter usage Using an electrical meter safely and efficiently is perhaps the most valuable skill an electronics technician can master, both for the sake of their own personal safety and for proficiency at their trade. It can be daunting at first to use a meter, knowing that you are connecting it to live circuits which may harbor life-threatening levels of voltage and current. This concern is not unfounded, and it is always best to proceed cautiously when using meters. Carelessness more than any other factor is what causes experienced technicians to have electrical accidents. The most common piece of electrical test equipment is a meter called the #emph[multimeter]. Multimeters are so named because they have the ability to measure a multiple of variables: voltage, current, resistance, and often many others, some of which cannot be explained here due to their complexity. In the hands of a trained technician, the multimeter is both an efficient work tool and a safety device. In the hands of someone ignorant and/or careless, however, the multimeter may become a source of danger when connected to a \"live\" circuit. There are many different brands of multimeters, with multiple models made by each manufacturer sporting different sets of features. The multimeter shown here in the following illustrations is a \"generic\" design, not specific to any manufacturer, but general enough to teach the basic principles of use: #image("static/00340.png") You will notice that the display of this meter is of the \"digital\" type: showing numerical values using four digits in a manner similar to a digital clock. The rotary selector switch (now set in the #emph[Off] position) has five different measurement positions it can be set in: two \"V\" settings, two \"A\" settings, and one setting in the middle with a funny-looking \"horseshoe\" symbol on it representing \"resistance.\" The \"horseshoe\" symbol is the Greek letter \"Omega\" (Ω), which is the common symbol for the electrical unit of ohms. Of the two \"V\" settings and two \"A\" settings, you will notice that each pair is divided into unique markers with either a pair of horizontal lines (one solid, one dashed), or a dashed line with a squiggly curve over it. The parallel lines represent \"DC\" while the squiggly curve represents \"AC.\" The \"V\" of course stands for \"voltage\" while the \"A\" stands for \"amperage\" (current). The meter uses different techniques, internally, to measure DC than it uses to measure AC, and so it requires the user to select which type of voltage (V) or current (A) is to be measured. Although we haven\'t discussed alternating current (AC) in any technical detail, this distinction in meter settings is an important one to bear in mind. There are three different sockets on the multimeter face into which we can plug our #emph[test leads]. Test leads are nothing more than specially-prepared wires used to connect the meter to the circuit under test. The wires are coated in a color-coded (either black or red) flexible insulation to prevent the user\'s hands from contacting the bare conductors, and the tips of the probes are sharp, stiff pieces of wire: #image("static/00341.png") The black test lead #emph[always] plugs into the black socket on the multimeter: the one marked \"COM\" for \"common.\" The red test lead plugs into either the red socket marked for voltage and resistance, or the red socket marked for current, depending on which quantity you intend to measure with the multimeter. To see how this works, let\'s look at a couple of examples showing the meter in use. First, we\'ll set up the meter to measure DC voltage from a battery: #image("static/00342.png") Note that the two test leads are plugged into the appropriate sockets on the meter for voltage, and the selector switch has been set for DC \"V\". Now, we\'ll take a look at an example of using the multimeter to measure AC voltage from a household electrical power receptacle (wall socket): #image("static/00343.png") The only difference in the setup of the meter is the placement of the selector switch: it is now turned to AC \"V\". Since we\'re still measuring voltage, the test leads will remain plugged in the same sockets. In both of these examples, it is #emph[imperative] that you not let the probe tips come in contact with one another while they are both in contact with their respective points on the circuit. If this happens, a short-circuit will be formed, creating a spark and perhaps even a ball of flame if the voltage source is capable of supplying enough current! The following image illustrates the potential for hazard: #image("static/00344.png") This is just one of the ways that a meter can become a source of hazard if used improperly. Voltage measurement is perhaps the most common function a multimeter is used for. It is certainly the primary measurement taken for safety purposes (part of the lock-out/tag-out procedure), and it should be well understood by the operator of the meter. Being that voltage is always relative between two points, the meter #emph[must] be firmly connected to two points in a circuit before it will provide a reliable measurement. That usually means both probes must be grasped by the user\'s hands and held against the proper contact points of a voltage source or circuit while measuring. Because a hand-to-hand shock current path is the most dangerous, holding the meter probes on two points in a high-voltage circuit in this manner is always a #emph[potential] hazard. If the protective insulation on the probes is worn or cracked, it is possible for the user\'s fingers to come into contact with the probe conductors during the time of test, causing a bad shock to occur. If it is possible to use only one hand to grasp the probes, that is a safer option. Sometimes it is possible to \"latch\" one probe tip onto the circuit test point so that it can be let go of and the other probe set in place, using only one hand. Special probe tip accessories such as spring clips can be attached to help facilitate this. Remember that meter test leads are part of the whole equipment package, and that they should be treated with the same care and respect that the meter itself is. If you need a special accessory for your test leads, such as a spring clip or other special probe tip, consult the product catalog of the meter manufacturer or other test equipment manufacturer. #emph[Do not] try to be creative and make your own test probes, as you may end up placing yourself in danger the next time you use them on a live circuit. Also, it must be remembered that digital multimeters usually do a good job of discriminating between AC and DC measurements, as they are set for one or the other when checking for voltage or current. As we have seen earlier, both AC and DC voltages and currents can be deadly, so when using a multimeter as a safety check device you should always check for the presence of both AC and DC, even if you\'re not expecting to find both! Also, when checking for the presence of hazardous voltage, you should be sure to check #emph[all] pairs of points in question. For example, suppose that you opened up an electrical wiring cabinet to find three large conductors supplying AC power to a load. The circuit breaker feeding these wires (supposedly) has been shut off, locked, and tagged. You double-checked the absence of power by pressing the #emph[Start] button for the load. Nothing happened, so now you move on to the third phase of your safety check: the meter test for voltage. First, you check your meter on a known source of voltage to see that its working properly. Any nearby power receptacle should provide a convenient source of AC voltage for a test. You do so and find that the meter indicates as it should. Next, you need to check for voltage among these three wires in the cabinet. But voltage is measured between #emph[two] points, so where do you check? #image("static/00353.png") The answer is to check between all combinations of those three points. As you can see, the points are labeled \"A\", \"B\", and \"C\" in the illustration, so you would need to take your multimeter (set in the voltmeter mode) and check between points A & B, B & C, and A & C. If you find voltage between any of those pairs, the circuit is not in a Zero Energy State. But wait! Remember that a multimeter will not register DC voltage when its in the AC voltage mode and vice versa, so you need to check those three pairs of points in #emph[each mode] for a total of six voltage checks in order to be complete! However, even with all that checking, we still haven\'t covered all possibilities yet. Remember that hazardous voltage can appear between a single wire and ground (in this case, the metal frame of the cabinet would be a good ground reference point) in a power system. So, to be perfectly safe, we not only have to check between A & B, B & C, and A & C (in both AC and DC modes), but we also have to check between A & ground, B & ground, and C & ground (in both AC and DC modes)! This makes for a grand total of twelve voltage checks for this seemingly simple scenario of only three wires. Then, of course, after we\'ve completed all these checks, we need to take our multimeter and re-test it against a known source of voltage such as a power receptacle to ensure that its still in good working order. Using a multimeter to check for resistance is a much simpler task. The test leads will be kept plugged in the same sockets as for the voltage checks, but the selector switch will need to be turned until it points to the \"horseshoe\" resistance symbol. Touching the probes across the device whose resistance is to be measured, the meter should properly display the resistance in ohms: #image("static/00345.png") One very important thing to remember about measuring resistance is that it must only be done on #emph[de-energized] components! When the meter is in \"resistance\" mode, it uses a small internal battery to generate a tiny current through the component to be measured. By sensing how difficult it is to move this current through the component, the resistance of that component can be determined and displayed. If there is any additional source of voltage in the meter-lead-component-lead-meter loop to either aid or oppose the resistance-measuring current produced by the meter, faulty readings will result. In a worse-case situation, the meter may even be damaged by the external voltage. The \"resistance\" mode of a multimeter is very useful in determining wire continuity as well as making precise measurements of resistance. When there is a good, solid connection between the probe tips (simulated by touching them together), the meter shows almost zero Ω. If the test leads had no resistance in them, it would read exactly zero: #image("static/00346.png") If the leads are not in contact with each other, or touching opposite ends of a broken wire, the meter will indicate infinite resistance (usually by displaying dashed lines or the abbreviation \"O.L.\" which stands for \"open loop\"): #image("static/00347.png") By far the most hazardous and complex application of the multimeter is in the measurement of current. The reason for this is quite simple: in order for the meter to measure current, the current to be measured must be forced to go #emph[through] the meter. This means that the meter must be made part of the current path of the circuit rather than just be connected off to the side somewhere as is the case when measuring voltage. In order to make the meter part of the current path of the circuit, the original circuit must be \"broken\" and the meter connected across the two points of the open break. To set the meter up for this, the selector switch must point to either AC or DC \"A\" and the red test lead must be plugged in the red socket marked \"A\". The following illustration shows a meter all ready to measure current and a circuit to be tested: #image("static/00348.png") Now, the circuit is broken in preparation for the meter to be connected: #image("static/00349.png") The next step is to insert the meter in-line with the circuit by connecting the two probe tips to the broken ends of the circuit, the black probe to the negative (-) terminal of the 9-volt battery and the red probe to the loose wire end leading to the lamp: #image("static/00350.png") This example shows a very safe circuit to work with. 9 volts hardly constitutes a shock hazard, and so there is little to fear in breaking this circuit open (bare handed, no less!) and connecting the meter in-line with the flow of electrons. However, with higher power circuits, this could be a hazardous endeavor indeed. Even if the circuit voltage was low, the normal current could be high enough that an injurious spark would result the moment the last meter probe connection was established. Another potential hazard of using a multimeter in its current-measuring (\"ammeter\") mode is failure to properly put it back into a voltage-measuring configuration before measuring voltage with it. The reasons for this are specific to ammeter design and operation. When measuring circuit current by placing the meter directly in the path of current, it is best to have the meter offer little or no resistance against the flow of electrons. Otherwise, any additional resistance offered by the meter would impede the electron flow and alter the circuits operation. Thus, the multimeter is designed to have practically zero ohms of resistance between the test probe tips when the red probe has been plugged into the red \"A\" (current-measuring) socket. In the voltage-measuring mode (red lead plugged into the red \"V\" socket), there are many mega-ohms of resistance between the test probe tips, because voltmeters are designed to have close to infinite resistance (so that they #emph[don\'t] draw any appreciable current from the circuit under test). When switching a multimeter from current- to voltage-measuring mode, its easy to spin the selector switch from the \"A\" to the \"V\" position and forget to correspondingly switch the position of the red test lead plug from \"A\" to \"V\". The result -- if the meter is then connected across a source of substantial voltage -- will be a short-circuit through the meter! #image("static/00351.png") To help prevent this, most multimeters have a warning feature by which they beep if ever there\'s a lead plugged in the \"A\" socket and the selector switch is set to \"V\". As convenient as features like these are, though, they are still no substitute for clear thinking and caution when using a multimeter. All good-quality multimeters contain fuses inside that are engineered to \"blow\" in the event of excessive current through them, such as in the case illustrated in the last image. Like all overcurrent protection devices, these fuses are primarily designed to #emph[protect the equipment] (in this case, the meter itself) from excessive damage, and only secondarily to protect the user from harm. A multimeter can be used to check its own current fuse by setting the selector switch to the resistance position and creating a connection between the two red sockets like this: #image("static/00352.png") A good fuse will indicate very little resistance while a blown fuse will always show \"O.L.\" (or whatever indication that model of multimeter uses to indicate no continuity). The actual number of ohms displayed for a good fuse is of little consequence, so long as its an arbitrarily low figure. So now that we\'ve seen how to use a multimeter to measure voltage, resistance, and current, what more is there to know? Plenty! The value and capabilities of this versatile test instrument will become more evident as you gain skill and familiarity using it. There is no substitute for regular practice with complex instruments such as these, so feel free to experiment on safe, battery-powered circuits. #core.review[ - A meter capable of checking for voltage, current, and resistance is called a #emph[multimeter], - As voltage is always relative between two points, a voltage-measuring meter (\"voltmeter\") must be connected to two points in a circuit in order to obtain a good reading. Be careful not to touch the bare probe tips together while measuring voltage, as this will create a short-circuit! - Remember to always check for both AC and DC voltage when using a multimeter to check for the presence of hazardous voltage on a circuit. Make sure you check for voltage between all pair-combinations of conductors, including between the individual conductors and ground! - When in the voltage-measuring (\"voltmeter\") mode, multimeters have very high resistance between their leads. - Never try to read resistance or continuity with a multimeter on a circuit that is energized. At best, the resistance readings you obtain from the meter will be inaccurate, and at worst the meter may be damaged and you may be injured. - Current measuring meters (\"ammeters\") are always connected in a circuit so the electrons have to flow #emph[through] the meter. - When in the current-measuring (\"ammeter\") mode, multimeters have practically no resistance between their leads. This is intended to allow electrons to flow through the meter with the least possible difficulty. If this were not the case, the meter would add extra resistance in the circuit, thereby affecting the current. ]
https://github.com/maxgraw/bachelor
https://raw.githubusercontent.com/maxgraw/bachelor/main/apps/document/src/5-implementation/interaction.typ
typst
Nachdem die Implementierung der Interfaces vorgestellt wurde, wird nun auf die Interaktion mit diesen eingegangen. Zuvor wurde bereits die Implementierung des 3D-Cursors über die Hit-Test-API erläutert. Zur Interaktion mit diesem Cursor, basierend auf der Kameraposition und -rotation, wird die entsprechende Funktion innerhalb der Render-Funktion von Three.js aufgerufen. In @figure-renderHitTest wird die Integration der Hit-Test-Funktion in die Render-Funktion dargestellt. #let code = ```js render(timestamp, frame) { if (frame) { this.hitTest(this.renderer, frame); } this.renderer.render(this.scene, this.camera); } ``` #figure( code, caption: [Integration der "hitTest" Funktion in die Render-Funktion] ) <figure-renderHitTest> Um die Interaktion mit allen anderen Interfaces zu ermöglichen, wird zunächst auf ein Select Event des Controllers gewartet. Der Controller stellt hierbei ein Touch Event auf dem Bildschirm dar. Wird dieses Event ausgelöst, wird die Methode handleSelect aufgerufen wie in @figure-eventListenerSelect dargestellt. #let code = ```js this.controller.addEventListener("select", () => this.handleSelect()); ``` #figure( code, caption: [Event-Listener für das "select" Event des Controllers] ) <figure-eventListenerSelect> Die Funktion handleSelect nutzt hierbei die Daten des Touch Events, um einen Raycast durchzuführen. Ein Raycast funktioniert ähnlich wie die Hit Test API und ermöglicht das Erstellen und Feuern eines virtuellen Strahls in die Szene. Wird dieser Strahl von einem Objekt geschnitten, wird dieses als Ergebnis des Raycasts zurückgegeben. Die Funktion nutzt verschiedene Hilfsfunktionen, um diese Logik zu implementieren, wie in @figure-handleSelectFirst dargestellt. #let code = ```js export async function handleSelect() { this.raycaster.controller.setFromXRController(this.controller); let options = [...]; const data = this.raycast(this.raycaster.controller, options); const type = getType(data); } ``` #figure( code, caption: [Auszug der "handleSelect" Funktion von @appendix-handleSelect] ) <figure-handleSelectFirst> Wie zuvor im User Flow dargestellt, wird anschließend die Art des angetippten Elements ausgewählt und die entsprechende Interaktion ausgeführt. Die Funktion dient hierbei als zentrale Steuerung der Anwendung und entscheidet durch aufbauende if-Abfragen, welche Interaktion ausgeführt wird. In @figure-handleSelectSecond wird ein weiterer Ausschnitt der "handleSelect"-Funktion dargestellt. Nachdem die Art des angetippten Elements ermittelt wurde, wird die jeweilige Interaktion durchgeführt. Im Falle eines 2D-Interfaces wird weiter auf die Art des Menüs überprüft, wie bereits im User Flow beschrieben. Die Funktion nutzt die handleDisplay-Funktion, um das Anzeigen oder Verstecken von Elementen zu steuern. Durch die Sammlung der gesamten Interaktionslogik in dieser Funktion kann die Anwendung leicht angepasst und durch neue Interaktionen erweitert werden. #let code = ```js export async function handleSelect() { if (type.ui) { if (data.object.userData.isCreate) { handleUI(this, data); } if (data.object.userData.isRemove) { this.selection.remove(this.selected); } handleDisplay(this, { menu: { create: false, remove: false, }, selected: false, appendOptions: false, }); return; } } ``` #figure( code, caption: [Auszug der "handleSelect" Funktion von @appendix-handleSelect] ) <figure-handleSelectSecond>
https://github.com/Myriad-Dreamin/typst.ts
https://raw.githubusercontent.com/Myriad-Dreamin/typst.ts/main/fuzzers/corpora/meta/outline_00.typ
typst
Apache License 2.0
#import "/contrib/templates/std-tests/preset.typ": * #show: test-page #set page("a7", margin: 20pt, numbering: "1") #set heading(numbering: "(1/a)") #show heading.where(level: 1): set text(12pt) #show heading.where(level: 2): set text(10pt) #set math.equation(numbering: "1") #outline() #outline(title: [Figures], target: figure) #outline(title: [Equations], target: math.equation) = Introduction #lorem(12) = Analysis #lorem(10) #[ #set heading(outlined: false) == Methodology #lorem(6) ] == Math $x$ is a very useful constant. See it in action: $ x = x $ == Interesting figures #figure(rect[CENSORED], kind: image, caption: [A picture showing a programmer at work.]) #figure(table[1x1], caption: [A very small table.]) == Programming ```rust fn main() { panic!("in the disco"); } ``` ==== Deep Stuff Ok ... // Ensure 'bookmarked' option doesn't affect the outline #set heading(numbering: "(I)", bookmarked: false) = #text(blue)[Sum]mary #lorem(10)
https://github.com/malramsay64/resume
https://raw.githubusercontent.com/malramsay64/resume/main/resume.typ
typst
#import "template.typ": * #let cvdata = toml("resume.toml") #let uservars = ( headingfont: "Roboto", bodyfont: "Open Sans", fontsize: 10pt, // 10pt, 11pt, 12pt awesomeColor: "concrete", linespacing: 6pt, showAddress: false, // true/false show address in contact info showNumber: true, // true/false show phone number in contact info headingsmallcaps: false ) // setrules and showrules can be overridden by re-declaring it here // #let setrules(doc) = { // // add custom document style rules here // // doc // } #let customrules(doc) = { // add custom document style rules here set page( paper: "a4", // a4, us-letter numbering: "1 / 1", number-align: right, // left, center, right ) doc } #let cvinit(doc) = { doc = setrules(uservars, doc) doc = showrules(uservars, doc) doc = customrules(doc) doc } // each section body can be overridden by re-declaring it here // #let cveducation = [] // ========================================================================== // #show: doc => cvinit(doc) #set page( footer: text(size: 9pt, fill: luma(80))[#cvdata.personal.name #h(1fr) #counter(page).display("1 / 1", both: true)] ) #cvheading(cvdata, uservars) #cvwork(cvdata, isbreakable: false) #cveducation(cvdata, isbreakable: false) #cvprojects(cvdata, isbreakable: false) // #cvaffiliations(cvdata) // #cvawards(cvdata) // #cvcertificates(cvdata) #cvpresentations(cvdata, isbreakable: false) #cvpublications(cvdata) // #cvskills(cvdata, isbreakable: false) // #cvreferences(cvdata) #endnote()
https://github.com/Quaternijkon/Typst_FLOW
https://raw.githubusercontent.com/Quaternijkon/Typst_FLOW/main/src/utils.typ
typst
#import "pdfpc.typ" #let _typst-builtin-numbering = numbering /// Add a dictionary to another dictionary recursively /// /// Example: `add-dicts((a: (b: 1), (a: (c: 2))` returns `(a: (b: 1, c: 2)` #let add-dicts(dict-a, dict-b) = { let res = dict-a for key in dict-b.keys() { if key in res and type(res.at(key)) == dictionary and type(dict-b.at(key)) == dictionary { res.insert(key, add-dicts(res.at(key), dict-b.at(key))) } else { res.insert(key, dict-b.at(key)) } } return res } /// Merge some dictionaries recursively /// /// Example: `merge-dicts((a: (b: 1)), (a: (c: 2)))` returns `(a: (b: 1, c: 2))` #let merge-dicts(init-dict, ..dicts) = { assert(dicts.named().len() == 0, message: "You must provide dictionaries as positional arguments") let res = init-dict for dict in dicts.pos() { res = add-dicts(res, dict) } return res } // ------------------------------------- // Slide counter // ------------------------------------- #let slide-counter = counter("touying-slide-counter") #let last-slide-counter = counter("touying-last-slide-counter") #let last-slide-number = context last-slide-counter.final().first() /// Get the progress of the current slide. /// /// - `callback` is the callback function `ratio => { .. }` to get the progress of the current slide. The `ratio` is a float number between 0 and 1. #let touying-progress(callback) = ( context { if last-slide-counter.final().first() == 0 { callback(1.0) return } let ratio = calc.min(1.0, slide-counter.get().first() / last-slide-counter.final().first()) callback(ratio) } ) // slide note state #let slide-note-state = state("touying-slide-note-state", none) #let current-slide-note = context slide-note-state.get() // ------------------------------------- // Saved states and counters // ------------------------------------- #let saved-frozen-states = state("touying-saved-frozen-states", ()) #let saved-default-frozen-states = state("touying-saved-default-frozen-states", ()) #let saved-frozen-counters = state("touying-saved-frozen-counters", ()) #let saved-default-frozen-counters = state("touying-saved-default-frozen-counters", ()) /// Remove leading and trailing empty elements from an array of content /// /// - `empty-contents` is a array of content that is considered empty /// /// Example: `trim(([], [ ], parbreak(), linebreak(), [a], [ ], [b], [c], linebreak(), parbreak(), [ ], [ ]))` returns `([a], [ ], [b], [c])` #let trim(arr, empty-contents: ([], [ ], parbreak(), linebreak())) = { let i = 0 let j = arr.len() - 1 while i != arr.len() and arr.at(i) in empty-contents { i += 1 } while j != i - 1 and arr.at(j) in empty-contents { j -= 1 } arr.slice(i, j + 1) } /// Add a label to a content /// /// Example: `label-it("key", [a])` is equivalent to `[a <key>]` /// /// - `it` is the content to label /// /// - `label-name` is the name of the label, or a label #let label-it(it, label-name) = { if type(label-name) == label { [#it#label-name] } else { assert(type(label-name) == str, message: repr(label-name)) [#it#label(label-name)] } } /// Reconstruct a content with a new body /// /// - `body-name` is the property name of the body field /// /// - `labeled` is a boolean indicating whether the fields should be labeled /// /// - `named` is a boolean indicating whether the fields should be named /// /// - `it` is the content to reconstruct /// /// - `new-body` is the new body you want to replace the old body with #let reconstruct(body-name: "body", labeled: true, named: false, it, ..new-body) = { let fields = it.fields() let label = fields.remove("label", default: none) let _ = fields.remove(body-name, default: none) if named { if label != none and labeled { return label-it((it.func())(..fields, ..new-body), label) } else { return (it.func())(..fields, ..new-body) } } else { if label != none and labeled { return label-it((it.func())(..fields.values(), ..new-body), label) } else { return (it.func())(..fields.values(), ..new-body) } } } /// Reconstruct a table-like content with new children /// /// - `named` is a boolean indicating whether the fields should be named /// /// - `it` is the content to reconstruct /// /// - `new-children` is the new children you want to replace the old children with #let reconstruct-table-like(named: true, labeled: true, it, new-children) = { reconstruct(body-name: "children", named: named, labeled: labeled, it, ..new-children) } #let typst-builtin-sequence = ([A] + [ ] + [B]).func() /// Determine if a content is a sequence /// /// Example: `is-sequence([a])` returns `true` #let is-sequence(it) = { type(it) == content and it.func() == typst-builtin-sequence } #let typst-builtin-styled = [#set text(fill: red)].func() /// Determine if a content is styled /// /// Example: `is-styled(text(fill: red)[Red])` returns `true` #let is-styled(it) = { type(it) == content and it.func() == typst-builtin-styled } /// Reconstruct a styled content with a new body /// /// - `it` is the content to reconstruct /// /// - `new-child` is the new child you want to replace the old body with #let reconstruct-styled(it, new-child) = { typst-builtin-styled(new-child, it.styles) } /// Determine if a content is a metadata /// /// Example: `is-metadata(metadata((a: 1)))` returns `true` #let is-metadata(it) = { type(it) == content and it.func() == metadata } /// Determine if a content is a metadata with a specific kind #let is-kind(it, kind) = { is-metadata(it) and type(it.value) == dictionary and it.value.at("kind", default: none) == kind } /// Determine if a content is a heading in a specific depth #let is-heading(it, depth: 9999) = { type(it) == content and it.func() == heading and it.depth <= depth } /// Call a `self => {..}` function and return the result, or just return the content #let call-or-display(self, it) = { if type(it) == function { it = it(self) } return [#it] } /// Wrap a function with a `self` parameter to make it a method /// /// Example: `#let hide = method-wrapper(hide)` to get a `hide` method #let method-wrapper(fn) = (self: none, ..args) => fn(..args) /// Assuming all functions in dictionary have a named `self` parameter, /// `methods` function is used to get all methods in dictionary object /// /// Example: `#let (uncover, only) = utils.methods(self)` to get `uncover` and `only` methods. #let methods(self) = { assert(type(self) == dictionary, message: "self must be a dictionary") assert("methods" in self and type(self.methods) == dictionary, message: "self.methods must be a dictionary") let methods = (:) for key in self.methods.keys() { if type(self.methods.at(key)) == function { methods.insert(key, (..args) => self.methods.at(key)(self: self, ..args)) } } return methods } // ------------------------------------- // Headings // ------------------------------------- /// Capitalize a string #let capitalize(s) = { assert(type(s) == str, message: "s must be a string") if s.len() == 0 { return s } let lowercase = lower(s) upper(lowercase.at(0)) + lowercase.slice(1) } /// Convert a string into title case #let titlecase(s) = { assert(type(s) == str, message: "s must be a string") if s.len() == 0 { return s } s.split(" ").map(capitalize).join(" ") } /// Convert a heading with label to short form /// /// - `it` is the heading #let short-heading(self: none, it) = { if it == none { return } let convert-label-to-short-heading = if ( type(self) == dictionary and "methods" in self and "convert-label-to-short-heading" in self.methods ) { self.methods.convert-label-to-short-heading } else { (self: none, lbl) => titlecase(lbl.replace(regex("^[^:]*:"), "").replace("_", " ").replace("-", " ")) } convert-label-to-short-heading = convert-label-to-short-heading.with(self: self) assert(type(it) == content and it.func() == heading, message: "it must be a heading") if not it.has("label") { return it.body } let lbl = str(it.label) return convert-label-to-short-heading(lbl) } /// Get the current heading On or before the current page. /// /// - `level` is the level of the heading. If `level` is `auto`, it will return the last heading on or before the current page. If `level` is a number, it will return the last heading on or before the current page with the same level. /// /// - `hierachical` is a boolean value to indicate whether to return the heading hierachically. If `hierachical` is `true`, it will return the last heading according to the hierachical structure. If `hierachical` is `false`, it will return the last heading on or before the current page with the same level. /// /// - `depth` is the maximum depth of the heading to search. Usually, it should be set as slide-level. #let current-heading(level: auto, hierachical: true, depth: 9999) = { let current-page = here().page() if not hierachical and level != auto { let headings = query(heading).filter(h => ( h.location().page() <= current-page and h.level <= depth and h.level == level )) return headings.at(-1, default: none) } let headings = query(heading).filter(h => h.location().page() <= current-page and h.level <= depth) if headings == () { return } if level == auto { return headings.last() } let current-level = headings.last().level let current-heading = headings.pop() while headings.len() > 0 and level < current-level { current-level = headings.last().level current-heading = headings.pop() } if level == current-level { return current-heading } } /// Display the current heading on the page. /// /// - `level` is the level of the heading. If `level` is `auto`, it will return the last heading on or before the current page. If `level` is a number, it will return the last heading on or before the current page with the same level. /// /// - `numbered` is a boolean value to indicate whether to display the numbering of the heading. Default is `true`. /// /// - `hierachical` is a boolean value to indicate whether to return the heading hierachically. If `hierachical` is `true`, it will return the last heading according to the hierachical structure. If `hierachical` is `false`, it will return the last heading on or before the current page with the same level. /// /// - `depth` is the maximum depth of the heading to search. Usually, it should be set as slide-level. /// /// - `setting` is the setting of the heading. Default is `body => body`. /// /// - `sty` is the style of the heading. If `sty` is a function, it will use the function to style the heading. For example, `sty: current-heading => current-heading.body`. #let display-current-heading( self: none, level: auto, numbered: true, hierachical: true, depth: 9999, setting: body => body, ..sty, ) = ( context { let sty = if sty.pos().len() > 1 { sty.pos().at(0) } else { current-heading => { if numbered and current-heading.numbering != none { _typst-builtin-numbering( current-heading.numbering, ..counter(heading).at(current-heading.location()), ) + h(.3em) } current-heading.body } } let current-heading = current-heading(level: level, hierachical: hierachical, depth: depth) if current-heading != none { setting(sty(current-heading)) } } ) /// Display the current heading number on the page. /// /// - `level` is the level of the heading. If `level` is `auto`, it will return the last heading on or before the current page. If `level` is a number, it will return the last heading on or before the current page with the same level. /// /// - `numbering` is the numbering of the heading. If `numbering` is `auto`, it will use the numbering of the heading. If `numbering` is a string, it will use the string as the numbering. /// /// - `hierachical` is a boolean value to indicate whether to return the heading hierachically. If `hierachical` is `true`, it will return the last heading according to the hierachical structure. If `hierachical` is `false`, it will return the last heading on or before the current page with the same level. /// /// - `depth` is the maximum depth of the heading to search. Usually, it should be set as slide-level. #let display-current-heading-number(level: auto, numbering: auto, hierachical: true, depth: 9999) = ( context { let current-heading = current-heading(level: level, hierachical: hierachical, depth: depth) if current-heading != none and numbering == auto and current-heading.numbering != none { _typst-builtin-numbering(current-heading.numbering, ..counter(heading).at(current-heading.location())) } else if current-heading != none and numbering != auto { _typst-builtin-numbering(numbering, ..counter(heading).at(current-heading.location())) } } ) /// Display the current short heading on the page. /// /// - `level` is the level of the heading. If `level` is `auto`, it will return the last heading on or before the current page. If `level` is a number, it will return the last heading on or before the current page with the same level. /// /// - `hierachical` is a boolean value to indicate whether to return the heading hierachically. If `hierachical` is `true`, it will return the last heading according to the hierachical structure. If `hierachical` is `false`, it will return the last heading on or before the current page with the same level. /// /// - `depth` is the maximum depth of the heading to search. Usually, it should be set as slide-level. /// /// - `sty` is the style of the heading. If `sty` is a function, it will use the function to style the heading. For example, `sty: current-heading => current-heading.body`. #let display-current-short-heading( self: none, level: auto, hierachical: true, depth: 9999, setting: body => body, ..sty, ) = ( context { let sty = if sty.pos().len() > 1 { sty.pos().at(0) } else { current-heading => { short-heading(self: self, current-heading) } } let current-heading = current-heading(level: level, hierachical: hierachical, depth: depth) if current-heading != none { setting(sty(current-heading)) } } ) /// Display the date of `self.info.date` with `self.datetime-format` format. #let display-info-date(self) = { assert("info" in self, message: "self must have an info field") if type(self.info.date) == datetime { self.info.date.display(self.at("datetime-format", default: auto)) } else { self.info.date } } /// Convert content to markup text, partly from /// [typst-examples-book](https://sitandr.github.io/typst-examples-book/book/typstonomicon/extract_markup_text.html). /// /// - `it` is the content to convert. /// /// - `mode` is the mode of the markup text, either `typ` or `md`. /// /// - `indent` is the number of spaces to indent, default is `0`. #let markup-text(it, mode: "typ", indent: 0) = { assert(mode == "typ" or mode == "md", message: "mode must be 'typ' or 'md'") let indent-markup-text = markup-text.with(mode: mode, indent: indent + 2) let markup-text = markup-text.with(mode: mode, indent: indent) if type(it) == str { it } else if type(it) == content { if it.func() == raw { if it.block { "\n" + indent * " " + "```" + it.lang + it .text .split("\n") .map(l => "\n" + indent * " " + l) .sum(default: "") + "\n" + indent * " " + "```" } else { "`" + it.text + "`" } } else if it == [ ] { " " } else if it.func() == enum.item { "\n" + indent * " " + "+ " + indent-markup-text(it.body) } else if it.func() == list.item { "\n" + indent * " " + "- " + indent-markup-text(it.body) } else if it.func() == terms.item { "\n" + indent * " " + "/ " + markup-text(it.term) + ": " + indent-markup-text(it.description) } else if it.func() == linebreak { "\n" + indent * " " } else if it.func() == parbreak { "\n\n" + indent * " " } else if it.func() == strong { if mode == "md" { "**" + markup-text(it.body) + "**" } else { "*" + markup-text(it.body) + "*" } } else if it.func() == emph { if mode == "md" { "*" + markup-text(it.body) + "*" } else { "_" + markup-text(it.body) + "_" } } else if it.func() == link and type(it.dest) == str { if mode == "md" { "[" + markup-text(it.body) + "](" + it.dest + ")" } else { "#link(\"" + it.dest + "\")[" + markup-text(it.body) + "]" } } else if it.func() == heading { if mode == "md" { it.depth * "#" + " " + markup-text(it.body) + "\n" } else { it.depth * "=" + " " + markup-text(it.body) + "\n" } } else if it.has("children") { it.children.map(markup-text).join() } else if it.has("body") { markup-text(it.body) } else if it.has("text") { if type(it.text) == str { it.text } else { markup-text(it.text) } } else if it.func() == smartquote { if it.double { "\"" } else { "'" } } else { "" } } else { repr(it) } } // Code: HEIGHT/WIDTH FITTING and cover-with-rect // Attribution: This file is based on the code from https://github.com/andreasKroepelin/polylux/pull/91 // Author: ntjess #let _size-to-pt(size, container-dimension) = { let to-convert = size if type(size) == ratio { to-convert = container-dimension * size } measure(v(to-convert)).height } #let _limit-content-width(width: none, body, container-size) = { let mutable-width = width if width == none { mutable-width = calc.min(container-size.width, measure(body).width) } else { mutable-width = _size-to-pt(width, container-size.width) } box(width: mutable-width, body) } /// Fit content to specified height. /// /// Example: `#utils.fit-to-height(1fr)[BIG]` /// /// - `width` will determine the width of the content after scaling. So, if you want the scaled content to fill half of the slide width, you can use width: 50%. /// /// - `prescale-width` allows you to make typst's layout assume that the given content is to be laid out in a container of a certain width before scaling. For example, you can use `prescale-width: 200%` assuming the slide's width is twice the original. /// /// - `grow` is a boolean indicating whether the content should be scaled up if it is smaller than the available height. Default is `true`. /// /// - `shrink` is a boolean indicating whether the content should be scaled down if it is larger than the available height. Default is `true`. /// /// - `height` is the height to fit the content to. /// /// - `body` is the content to fit. #let fit-to-height( width: none, prescale-width: none, grow: true, shrink: true, height, body, ) = { // Place two labels with the requested vertical separation to be able to // measure their vertical distance in pt. // Using this approach instead of using `measure` allows us to accept fractions // like `1fr` as well. // The label must be attached to content, so we use a show rule that doesn't // display anything as the anchor. let before-label = label("touying-fit-height-before") let after-label = label("touying-fit-height-after") [ #show before-label: none #show after-label: none #v(1em) hidden#before-label #v(height) hidden#after-label ] context { let before = query(selector(before-label).before(here())) let before-pos = before.last().location().position() let after = query(selector(after-label).before(here())) let after-pos = after.last().location().position() let available-height = after-pos.y - before-pos.y layout(container-size => { // Helper function to more easily grab absolute units let get-pts(body, w-or-h) = { let dim = if w-or-h == "w" { container-size.width } else { container-size.height } _size-to-pt(body, dim) } // Provide a sensible initial width, which will define initial scale parameters. // Note this is different from the post-scale width, which is a limiting factor // on the allowable scaling ratio let boxed-content = _limit-content-width( width: prescale-width, body, container-size, ) // post-scaling width let mutable-width = width if width == none { mutable-width = container-size.width } mutable-width = get-pts(mutable-width, "w") let size = measure(boxed-content) if size.height == 0pt or size.width == 0pt { return body } let h-ratio = available-height / size.height let w-ratio = mutable-width / size.width let ratio = calc.min(h-ratio, w-ratio) * 100% if ((shrink and (ratio < 100%)) or (grow and (ratio > 100%))) { let new-width = size.width * ratio v(-available-height) // If not boxed, the content can overflow to the next page even though it will // fit. This is because scale doesn't update the layout information. // Boxing in a container without clipping will inform typst that content // will indeed fit in the remaining space box( width: new-width, height: available-height, scale(x: ratio, y: ratio, origin: top + left, boxed-content), ) } else { body } }) } } /// Fit content to specified width. /// /// Example: `#utils.fit-to-width(1fr)[BIG]` /// /// - `grow` is a boolean indicating whether the content should be scaled up if it is smaller than the available width. Default is `true`. /// /// - `shrink` is a boolean indicating whether the content should be scaled down if it is larger than the available width. Default is `true`. /// /// - `width` is the width to fit the content to. /// /// - `body` is the content to fit. #let fit-to-width(grow: true, shrink: true, width, content) = { layout(layout-size => { let content-size = measure(content) let content-width = content-size.width let width = _size-to-pt(width, layout-size.width) if (content-width != 0pt and ((shrink and (width < content-width)) or (grow and (width > content-width)))) { let ratio = width / content-width * 100% // The first box keeps content from prematurely wrapping let scaled = scale( box(content, width: content-width), origin: top + left, x: ratio, y: ratio, ) // The second box lets typst know the post-scaled dimensions, since `scale` // doesn't update layout information box(scaled, width: width, height: content-size.height * ratio) } else { content } }) } /// Cover content with a rectangle of a specified color. If you set the fill to the background color of the page, you can use this to create a semi-transparent overlay. /// /// Example: `#utils.cover-with-rect(fill: "red")[Hidden]` /// /// - `cover-args` are the arguments to pass to the rectangle. /// /// - `fill` is the color to fill the rectangle with. /// /// - `inline` is a boolean indicating whether the content should be displayed inline. Default is `true`. /// /// - `body` is the content to cover. #let cover-with-rect(..cover-args, fill: auto, inline: true, body) = { if fill == auto { panic("`auto` fill value is not supported until typst provides utilities to" + " retrieve the current page background") } if type(fill) == str { fill = rgb(fill) } let to-display = layout(layout-size => { context { let body-size = measure(body) let bounding-width = calc.min(body-size.width, layout-size.width) let wrapped-body-size = measure(box(body, width: bounding-width)) let named = cover-args.named() if "width" not in named { named.insert("width", wrapped-body-size.width) } if "height" not in named { named.insert("height", wrapped-body-size.height) } if "outset" not in named { // This outset covers the tops of tall letters and the bottoms of letters with // descenders. Alternatively, we could use // `set text(top-edge: "bounds", bottom-edge: "bounds")` to get the same effect, // but this changes text alignment and also misaligns bullets in enums/lists. // In contrast, `outset` preserves spacing and alignment at the cost of adding // a slight, visible border when the covered object is right next to the edge // of a color change. named.insert("outset", (top: 0.15em, bottom: 0.25em)) } stack( spacing: -wrapped-body-size.height, body, rect(fill: fill, ..named, ..cover-args.pos()), ) } }) if inline { box(to-display) } else { to-display } } /// Update the alpha channel of a color. /// /// Example: `update-alpha(rgb("#ff0000"), 0.5)` returns `rgb(255, 0, 0, 0.5)` /// /// - `constructor` is the color constructor to use. Default is `rgb`. /// /// - `color` is the color to update. /// /// - `alpha` is the new alpha value. #let update-alpha(constructor: rgb, color, alpha) = constructor(..color.components(alpha: true).slice(0, -1), alpha) /// Cover content with a transparent rectangle. /// /// Example: `config-methods(cover: utils.semi-transparent-cover)` #let semi-transparent-cover(self: none, constructor: rgb, alpha: 85%, body) = { cover-with-rect( fill: update-alpha( constructor: constructor, self.page.fill, alpha, ), body, ) } /// Alert content with a primary color. /// /// Example: `config-methods(alert: utils.alert-with-primary-color)` #let alert-with-primary-color(self: none, body) = text(fill: self.colors.primary, body) /// Alert content. #let alert(self: none, body) = (self.methods.alert)(self: self, body) // Code: check visible subslides and dynamic control // Attribution: This file is based on the code from https://github.com/andreasKroepelin/polylux/blob/main/logic.typ // Author: <NAME> #let _parse-subslide-indices(s) = { let parts = s.split(",").map(p => p.trim()) let parse-part(part) = { let match-until = part.match(regex("^-([[:digit:]]+)$")) let match-beginning = part.match(regex("^([[:digit:]]+)-$")) let match-range = part.match(regex("^([[:digit:]]+)-([[:digit:]]+)$")) let match-single = part.match(regex("^([[:digit:]]+)$")) if match-until != none { let parsed = int(match-until.captures.first()) // assert(parsed > 0, "parsed idx is non-positive") (until: parsed) } else if match-beginning != none { let parsed = int(match-beginning.captures.first()) // assert(parsed > 0, "parsed idx is non-positive") (beginning: parsed) } else if match-range != none { let parsed-first = int(match-range.captures.first()) let parsed-last = int(match-range.captures.last()) // assert(parsed-first > 0, "parsed idx is non-positive") // assert(parsed-last > 0, "parsed idx is non-positive") (beginning: parsed-first, until: parsed-last) } else if match-single != none { let parsed = int(match-single.captures.first()) // assert(parsed > 0, "parsed idx is non-positive") parsed } else { panic("failed to parse visible slide idx:" + part) } } parts.map(parse-part) } /// Check if a slide is visible /// /// Example: `check-visible(3, "2-")` returns `true` /// /// - `idx` is the index of the slide /// /// - `visible-subslides` is a single integer, an array of integers, /// or a string that specifies the visible subslides /// /// Read [polylux book](https://polylux.dev/book/dynamic/complex.html) /// /// The simplest extension is to use an array, such as `(1, 2, 4)` indicating that /// slides 1, 2, and 4 are visible. This is equivalent to the string `"1, 2, 4"`. /// /// You can also use more convenient and complex strings to specify visible slides. /// /// For example, "-2, 4, 6-8, 10-" means slides 1, 2, 4, 6, 7, 8, 10, and slides after 10 are visible. #let check-visible(idx, visible-subslides) = { if type(visible-subslides) == int { idx == visible-subslides } else if type(visible-subslides) == array { visible-subslides.any(s => check-visible(idx, s)) } else if type(visible-subslides) == str { let parts = _parse-subslide-indices(visible-subslides) check-visible(idx, parts) } else if type(visible-subslides) == content and visible-subslides.has("text") { let parts = _parse-subslide-indices(visible-subslides.text) check-visible(idx, parts) } else if type(visible-subslides) == dictionary { let lower-okay = if "beginning" in visible-subslides { visible-subslides.beginning <= idx } else { true } let upper-okay = if "until" in visible-subslides { visible-subslides.until >= idx } else { true } lower-okay and upper-okay } else { panic("you may only provide a single integer, an array of integers, or a string") } } #let last-required-subslide(visible-subslides) = { if type(visible-subslides) == int { visible-subslides } else if type(visible-subslides) == array { calc.max(..visible-subslides.map(s => last-required-subslide(s))) } else if type(visible-subslides) == str { let parts = _parse-subslide-indices(visible-subslides) last-required-subslide(parts) } else if type(visible-subslides) == dictionary { let last = 0 if "beginning" in visible-subslides { last = calc.max(last, visible-subslides.beginning) } if "until" in visible-subslides { last = calc.max(last, visible-subslides.until) } last } else { panic("you may only provide a single integer, an array of integers, or a string") } } /// Uncover content in some subslides. Reserved space when hidden (like `#hide()`). /// /// Example: `uncover("2-")[abc]` will display `[abc]` if the current slide is 2 or later /// /// - `visible-subslides` is a single integer, an array of integers, /// or a string that specifies the visible subslides /// /// Read [polylux book](https://polylux.dev/book/dynamic/complex.html) /// /// The simplest extension is to use an array, such as `(1, 2, 4)` indicating that /// slides 1, 2, and 4 are visible. This is equivalent to the string `"1, 2, 4"`. /// /// You can also use more convenient and complex strings to specify visible slides. /// /// For example, "-2, 4, 6-8, 10-" means slides 1, 2, 4, 6, 7, 8, 10, and slides after 10 are visible. /// /// - `uncover-cont` is the content to display when the content is visible in the subslide. #let uncover(self: none, visible-subslides, uncover-cont) = { let cover = self.methods.cover.with(self: self) if check-visible(self.subslide, visible-subslides) { uncover-cont } else { cover(uncover-cont) } } /// Display content in some subslides only. /// Don't reserve space when hidden, content is completely not existing there. /// /// - `visible-subslides` is a single integer, an array of integers, /// or a string that specifies the visible subslides /// /// Read [polylux book](https://polylux.dev/book/dynamic/complex.html) /// /// The simplest extension is to use an array, such as `(1, 2, 4)` indicating that /// slides 1, 2, and 4 are visible. This is equivalent to the string `"1, 2, 4"`. /// /// You can also use more convenient and complex strings to specify visible slides. /// /// For example, "-2, 4, 6-8, 10-" means slides 1, 2, 4, 6, 7, 8, 10, and slides after 10 are visible. /// /// - `only-cont` is the content to display when the content is visible in the subslide. #let only(self: none, visible-subslides, only-cont) = { if check-visible(self.subslide, visible-subslides) { only-cont } } /// `#alternatives` has a couple of "cousins" that might be more convenient in some situations. The first one is `#alternatives-match` that has a name inspired by match-statements in many functional programming languages. The idea is that you give it a dictionary mapping from subslides to content: /// /// #example(``` /// #alternatives-match(( /// "1, 3-5": [this text has the majority], /// "2, 6": [this is shown less often] /// )) /// ```) /// /// - `subslides-contents` is a dictionary mapping from subslides to content. /// /// - `position` is the position of the content. Default is `bottom + left`. #let alternatives-match(self: none, subslides-contents, position: bottom + left) = { let subslides-contents = if type(subslides-contents) == dictionary { subslides-contents.pairs() } else { subslides-contents } let subslides = subslides-contents.map(it => it.first()) let contents = subslides-contents.map(it => it.last()) context { let sizes = contents.map(c => measure(c)) let max-width = calc.max(..sizes.map(sz => sz.width)) let max-height = calc.max(..sizes.map(sz => sz.height)) for (subslides, content) in subslides-contents { only( self: self, subslides, box( width: max-width, height: max-height, align(position, content), ), ) } } } /// `#alternatives` is able to show contents sequentially in subslides. /// /// Example: `#alternatives[Ann][Bob][Christopher]` will show "Ann" in the first subslide, "Bob" in the second subslide, and "Christopher" in the third subslide. /// /// - `start` is the starting subslide number. Default is `1`. /// /// - `repeat-last` is a boolean indicating whether the last subslide should be repeated. Default is `true`. #let alternatives( self: none, start: 1, repeat-last: true, ..args, ) = { let contents = args.pos() let kwargs = args.named() let subslides = range(start, start + contents.len()) if repeat-last { subslides.last() = (beginning: subslides.last()) } alternatives-match(self: self, subslides.zip(contents), ..kwargs) } /// You can have very fine-grained control over the content depending on the current subslide by using #alternatives-fn. It accepts a function (hence the name) that maps the current subslide index to some content. /// /// Example: `#alternatives-fn(start: 2, count: 7, subslide => { numbering("(i)", subslide) })` /// /// - `start` is the starting subslide number. Default is `1`. /// /// - `end` is the ending subslide number. Default is `none`. /// /// - `count` is the number of subslides. Default is `none`. #let alternatives-fn( self: none, start: 1, end: none, count: none, ..kwargs, fn, ) = { let end = if end == none { if count == none { panic("You must specify either end or count.") } else { start + count } } else { end } let subslides = range(start, end) let contents = subslides.map(fn) alternatives-match(self: self, subslides.zip(contents), ..kwargs.named()) } /// You can use this function if you want to have one piece of content that changes only slightly depending of what "case" of subslides you are in. /// /// #example(``` /// #alternatives-cases(("1, 3", "2"), case => [ /// #set text(fill: teal) if case == 1 /// Some text /// ]) /// ```) /// /// - `cases` is an array of strings that specify the subslides for each case. /// /// - `fn` is a function that maps the case to content. The argument `case` is the index of the cases array you input. #let alternatives-cases(self: none, cases, fn, ..kwargs) = { let idcs = range(cases.len()) let contents = idcs.map(fn) alternatives-match(self: self, cases.zip(contents), ..kwargs.named()) } /// Speaker notes are a way to add additional information to your slides that is not visible to the audience. This can be useful for providing additional context or reminders to yourself. /// /// Example: `#speaker-note[This is a speaker note]` /// /// - `self` is the current context. /// /// - `mode` is the mode of the markup text, either `typ` or `md`. Default is `typ`. /// /// - `setting` is a function that takes the note as input and returns a processed note. /// /// - `note` is the content of the speaker note. #let speaker-note(self: none, mode: "typ", setting: it => it, note) = { if self.at("enable-pdfpc", default: true) { let raw-text = if type(note) == content and note.has("text") { note.text } else { markup-text(note, mode: mode).trim() } pdfpc.speaker-note(raw-text) } let show-notes-on-second-screen = self.at("show-notes-on-second-screen", default: none) assert( show-notes-on-second-screen in (none, bottom, right), message: "`show-notes-on-second-screen` should be `none`, `bottom` or `right`", ) if show-notes-on-second-screen != none { slide-note-state.update(setting(note)) } } /// i18n Outline Title #let i18n-outline-title = context { let mapping = ( ar: "المحتويات", ca: "Índex", cs: "Obsah", da: "Indhold", en: "Outline", es: "Índice", et: "Sisukord", fi: "Sisällys", ja: "目次", ru: "Содержание", zh-TW: "目錄", zh: "目录", ) mapping.at(text.lang, default: mapping.en) }
https://github.com/DeveloperPaul123/modern-cv
https://raw.githubusercontent.com/DeveloperPaul123/modern-cv/main/tests/utilities/test.typ
typst
Other
#import "@local/modern-cv:0.7.0": * // setup the document like we do for the resume #let font = ("Source Sans Pro", "Source Sans 3") #set text( font: font, size: 11pt, fill: color-darkgray, fallback: true, ) #set page( paper: "a4", margin: (left: 15mm, right: 15mm, top: 10mm, bottom: 10mm), footer: [], footer-descent: 0pt, ) // set paragraph spacing #set par(spacing: 0.75em, justify: true) #set heading( numbering: none, outlined: false, ) #show heading.where(level: 1): it => [ #set block( above: 1em, below: 1em, ) #set text( size: 16pt, weight: "regular", ) #align(left)[ #let color = if colored-headers { accent-color } else { color-darkgray } #text[#strong[#text(color)[#it.body.text]]] #box(width: 1fr, line(length: 100%)) ] ] #show heading.where(level: 2): it => { set text( color-darkgray, size: 12pt, style: "normal", weight: "bold", ) it.body } #show heading.where(level: 3): it => { set text( size: 10pt, weight: "regular", ) smallcaps[#it.body] } #justified-header("Modern CV", "A modern curriculum vitae template") #secondary-justified-header("Created by", "DeveloperPaul123") #github-link("DeveloperPaul123/modern-cv") #linkedin-icon #github-icon #twitter-icon #google-scholar-icon #orcid-icon #phone-icon #email-icon #birth-icon #homepage-icon #website-icon #square(size: 1em, fill: color-darkgray) #square(size: 1em, fill: color-darknight) #square(size: 1em, fill: color-gray) #square(size: 1em, fill: default-accent-color) #square(size: 1em, fill: default-location-color)
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/043.%20submarine.html.typ
typst
submarine.html The Submarine April 2005"Suits make a corporate comeback," says the New York Times. Why does this sound familiar? Maybe because the suit was also back in February, September 2004, June 2004, March 2004, September 2003, November 2002, April 2002, and February 2002. Why do the media keep running stories saying suits are back? Because PR firms tell them to. One of the most surprising things I discovered during my brief business career was the existence of the PR industry, lurking like a huge, quiet submarine beneath the news. Of the stories you read in traditional media that aren't about politics, crimes, or disasters, more than half probably come from PR firms.I know because I spent years hunting such "press hits." Our startup spent its entire marketing budget on PR: at a time when we were assembling our own computers to save money, we were paying a PR firm $16,000 a month. And they were worth it. PR is the news equivalent of search engine optimization; instead of buying ads, which readers ignore, you get yourself inserted directly into the stories. [1]Our PR firm was one of the best in the business. In 18 months, they got press hits in over 60 different publications. And we weren't the only ones they did great things for. In 1997 I got a call from another startup founder considering hiring them to promote his company. I told him they were PR gods, worth every penny of their outrageous fees. But I remember thinking his company's name was odd. Why call an auction site "eBay"? SymbiosisPR is not dishonest. Not quite. In fact, the reason the best PR firms are so effective is precisely that they aren't dishonest. They give reporters genuinely valuable information. A good PR firm won't bug reporters just because the client tells them to; they've worked hard to build their credibility with reporters, and they don't want to destroy it by feeding them mere propaganda.If anyone is dishonest, it's the reporters. The main reason PR firms exist is that reporters are lazy. Or, to put it more nicely, overworked. Really they ought to be out there digging up stories for themselves. But it's so tempting to sit in their offices and let PR firms bring the stories to them. After all, they know good PR firms won't lie to them.A good flatterer doesn't lie, but tells his victim selective truths (what a nice color your eyes are). Good PR firms use the same strategy: they give reporters stories that are true, but whose truth favors their clients.For example, our PR firm often pitched stories about how the Web let small merchants compete with big ones. This was perfectly true. But the reason reporters ended up writing stories about this particular truth, rather than some other one, was that small merchants were our target market, and we were paying the piper.Different publications vary greatly in their reliance on PR firms. At the bottom of the heap are the trade press, who make most of their money from advertising and would give the magazines away for free if advertisers would let them. [2] The average trade publication is a bunch of ads, glued together by just enough articles to make it look like a magazine. They're so desperate for "content" that some will print your press releases almost verbatim, if you take the trouble to write them to read like articles.At the other extreme are publications like the New York Times and the Wall Street Journal. Their reporters do go out and find their own stories, at least some of the time. They'll listen to PR firms, but briefly and skeptically. We managed to get press hits in almost every publication we wanted, but we never managed to crack the print edition of the Times. [3]The weak point of the top reporters is not laziness, but vanity. You don't pitch stories to them. You have to approach them as if you were a specimen under their all-seeing microscope, and make it seem as if the story you want them to run is something they thought of themselves.Our greatest PR coup was a two-part one. We estimated, based on some fairly informal math, that there were about 5000 stores on the Web. We got one paper to print this number, which seemed neutral enough. But once this "fact" was out there in print, we could quote it to other publications, and claim that with 1000 users we had 20% of the online store market.This was roughly true. We really did have the biggest share of the online store market, and 5000 was our best guess at its size. But the way the story appeared in the press sounded a lot more definite.Reporters like definitive statements. For example, many of the stories about <NAME>'s conviction say that he was one of the 10 worst spammers. This "fact" originated in Spamhaus's ROKSO list, which I think even Spamhaus would admit is a rough guess at the top spammers. The first stories about Jaynes cited this source, but now it's simply repeated as if it were part of the indictment. [4]All you can say with certainty about Jaynes is that he was a fairly big spammer. But reporters don't want to print vague stuff like "fairly big." They want statements with punch, like "top ten." And PR firms give them what they want. Wearing suits, we're told, will make us 3.6 percent more productive.BuzzWhere the work of PR firms really does get deliberately misleading is in the generation of "buzz." They usually feed the same story to several different publications at once. And when readers see similar stories in multiple places, they think there is some important trend afoot. Which is exactly what they're supposed to think.When Windows 95 was launched, people waited outside stores at midnight to buy the first copies. None of them would have been there without PR firms, who generated such a buzz in the news media that it became self-reinforcing, like a nuclear chain reaction.I doubt PR firms realize it yet, but the Web makes it possible to track them at work. If you search for the obvious phrases, you turn up several efforts over the years to place stories about the return of the suit. For example, the Reuters article that got picked up by USA Today in September 2004. "The suit is back," it begins.Trend articles like this are almost always the work of PR firms. Once you know how to read them, it's straightforward to figure out who the client is. With trend stories, PR firms usually line up one or more "experts" to talk about the industry generally. In this case we get three: the NPD Group, the creative director of GQ, and a research director at <NAME>. [5] When you get to the end of the experts, look for the client. And bingo, there it is: The Men's Wearhouse.Not surprising, considering The Men's Wearhouse was at that moment running ads saying "The Suit is Back." Talk about a successful press hit-- a wire service article whose first sentence is your own ad copy.The secret to finding other press hits from a given pitch is to realize that they all started from the same document back at the PR firm. Search for a few key phrases and the names of the clients and the experts, and you'll turn up other variants of this story.Casual fridays are out and dress codes are in writes <NAME> in The Boston Globe. In a remarkable coincidence, Ms. Lewis's industry contacts also include the creative director of GQ.Ripped jeans and T-shirts are out, writes <NAME> in US News & World Report. And she too knows the creative director of GQ.Men's suits are back writes <NAME> in Sexbuzz.Com ("the ultimate men's entertainment magazine").Dressing down loses appeal as men suit up at the office writes Tenisha Mercer of The Detroit News. Now that so many news articles are online, I suspect you could find a similar pattern for most trend stories placed by PR firms. I propose we call this new sport "PR diving," and I'm sure there are far more striking examples out there than this clump of five stories.OnlineAfter spending years chasing them, it's now second nature to me to recognize press hits for what they are. But before we hired a PR firm I had no idea where articles in the mainstream media came from. I could tell a lot of them were crap, but I didn't realize why.Remember the exercises in critical reading you did in school, where you had to look at a piece of writing and step back and ask whether the author was telling the whole truth? If you really want to be a critical reader, it turns out you have to step back one step further, and ask not just whether the author is telling the truth, but why he's writing about this subject at all.Online, the answer tends to be a lot simpler. Most people who publish online write what they write for the simple reason that they want to. You can't see the fingerprints of PR firms all over the articles, as you can in so many print publications-- which is one of the reasons, though they may not consciously realize it, that readers trust bloggers more than Business Week.I was talking recently to a friend who works for a big newspaper. He thought the print media were in serious trouble, and that they were still mostly in denial about it. "They think the decline is cyclic," he said. "Actually it's structural."In other words, the readers are leaving, and they're not coming back. Why? I think the main reason is that the writing online is more honest. Imagine how incongruous the New York Times article about suits would sound if you read it in a blog: The urge to look corporate-- sleek, commanding, prudent, yet with just a touch of hubris on your well-cut sleeve-- is an unexpected development in a time of business disgrace. The problem with this article is not just that it originated in a PR firm. The whole tone is bogus. This is the tone of someone writing down to their audience.Whatever its flaws, the writing you find online is authentic. It's not mystery meat cooked up out of scraps of pitch letters and press releases, and pressed into molds of zippy journalese. It's people writing what they think.I didn't realize, till there was an alternative, just how artificial most of the writing in the mainstream media was. I'm not saying I used to believe what I read in Time and Newsweek. Since high school, at least, I've thought of magazines like that more as guides to what ordinary people were being told to think than as sources of information. But I didn't realize till the last few years that writing for publication didn't have to mean writing that way. I didn't realize you could write as candidly and informally as you would if you were writing to a friend.Readers aren't the only ones who've noticed the change. The PR industry has too. A hilarious article on the site of the PR Society of America gets to the heart of the matter: Bloggers are sensitive about becoming mouthpieces for other organizations and companies, which is the reason they began blogging in the first place. PR people fear bloggers for the same reason readers like them. And that means there may be a struggle ahead. As this new kind of writing draws readers away from traditional media, we should be prepared for whatever PR mutates into to compensate. When I think how hard PR firms work to score press hits in the traditional media, I can't imagine they'll work any less hard to feed stories to bloggers, if they can figure out how. Notes[1] PR has at least one beneficial feature: it favors small companies. If PR didn't work, the only alternative would be to advertise, and only big companies can afford that.[2] Advertisers pay less for ads in free publications, because they assume readers ignore something they get for free. This is why so many trade publications nominally have a cover price and yet give away free subscriptions with such abandon.[3] Different sections of the Times vary so much in their standards that they're practically different papers. Whoever fed the style section reporter this story about suits coming back would have been sent packing by the regular news reporters.[4] The most striking example I know of this type is the "fact" that the Internet worm of 1988 infected 6000 computers. I was there when it was cooked up, and this was the recipe: someone guessed that there were about 60,000 computers attached to the Internet, and that the worm might have infected ten percent of them.Actually no one knows how many computers the worm infected, because the remedy was to reboot them, and this destroyed all traces. But people like numbers. And so this one is now replicated all over the Internet, like a little worm of its own.[5] Not all were necessarily supplied by the PR firm. Reporters sometimes call a few additional sources on their own, like someone adding a few fresh vegetables to a can of soup. Thanks to <NAME>, <NAME>, <NAME>, Jessica Livingston, <NAME>, <NAME>, and <NAME> (who also found the PRSA article) for reading drafts of this.Correction: Earlier versions used a recent Business Week article mentioning del.icio.us as an example of a press hit, but Joshua Schachter tells me it was spontaneous.The Web is a Writing EnvironmentA Sell-Out's TaleHow to Pitch BloggersBlogging for Milk7 Habits of Highly Effective Blog PRPR People Need To Learn To Deal With New GatekeepersMarqui Blogosphere ProgramPR WatchReal Men ExfoliateHow the News is MadeJanuary 2006: The suit is back yet againThe Decline of the TieJapanese Translation If you liked this, you may also like Hackers & Painters.
https://github.com/Myriad-Dreamin/tinymist
https://raw.githubusercontent.com/Myriad-Dreamin/tinymist/main/crates/tinymist-query/src/fixtures/docs/no_comment.typ
typst
Apache License 2.0
#let x /* some comment */ = 1; #let x /* ident */ = 1;
https://github.com/connachermurphy/typst-cv
https://raw.githubusercontent.com/connachermurphy/typst-cv/main/cv.typ
typst
MIT License
#let heading_size = 16pt #let heading_font = "Futura" #let body_font = "Lora" // #let body_font = "Linux Libertine" #let cv( name: "Name", email: "Email", date: "", body, ) = { set document(author: name, title: "CV Title Placeholder") set page(paper: "us-letter", numbering: "1", number-align: center) set text(font: body_font, lang: "en", size: 12pt) show heading.where(level: 1): set text(font: heading_font, style: "normal", weight: "regular", size: heading_size) show link: set text(blue) show link: underline align( [#text(name, font: heading_font, style: "normal", weight: "regular", size: heading_size) \ #email \ Last updated: #date] , center) // v(-12pt) // line(length: 100%) // v(-6pt) body } #let experience( description: "Experience description", date: "Date", body ) = { stack(dir: ltr, { [#description] }, { set align(right) date } ) }
https://github.com/7sDream/fonts-and-layout-zhCN
https://raw.githubusercontent.com/7sDream/fonts-and-layout-zhCN/master/chapters/07-localisation/lang-spec.typ
typst
Other
#import "/template/template.typ": web-page-template #import "/template/components.typ": note, cross-link, title-ref, cross-ref #import "/lib/glossary.typ": tr #show: web-page-template // ## Language-specific substitutes == 特定语言专属#tr[substitution] // We've already mentioned the Serbian form of the letter be (б), which should appear different to the Russian Cyrillic version. This is one example of a family of *language-specific substitutes*, which we can handle more or less in the same way. 我们#cross-link(<position:serbian-letter-be>, web-path: "/chapters/05-features/lang-script.typ")[此前]提到过,塞尔维亚样式的西里尔字母`be`和俄文中的不太一样。这是按照具体语言来决定是否进行#tr[substitution]的一个需求案例,这类需求基本上都能用下面这种方式处理。 // First, we design our two glyphs, the Russian be (let's call the glyph `be-cy`) and the Serbian variant, which we'll call `be-cy.SRB`. We want a feature which is on by default, occurs early in the process, is pre-shaping (in that it rewrites the input glyph stream) and which substitutes localized forms - this is precisely what the `locl` feature was designed for. We look up the script tag for Cyrillic (`cyrl`) and the language tag for Serbian (`SRB`), and we create a rule that applies only when the input is tagged as being in the Serbian language. We want to do a one-to-one substitution - any Cyrillic be gets swapped out for a Serbian one - so we create a single substitution rule. 首先设计两个#tr[glyph],俄文版的`be`(就叫它`be-cy`)和塞尔维亚语中的变体版本(叫它`be-cy.SRB`)。我们想在一个默认启用的,在处理流程早期,#tr[shaping]阶段前(因为它要重写输入流)生效的,用于#tr[glyph]本地化的特性。这基本上精确锁定了`locl`特性。然后我们查找到,西里尔字母和塞尔维亚语的OpenType标签分别是`cyrl`和`SRB`。接着我们创建一个只在输入文本被标记为塞尔维亚语时才会应用的规则。一个西里尔字母只会变成一个塞尔维亚版的字母,所以我们使用一换一#tr[substitution]: ```fea feature locl { script cyrl; language SRB; sub be-cy by be-cy.SRB; } locl; ``` 这样就行了。 // We can apply the same kind of substitution not just to base characters but also to marks and combining characters, although we need a little thought. In Polish, there's an accent called a *kreska* which looks like an acute accent from other Latin scripts - and in fact, is expressed in Unicode as an acute accent - but is somewhat more upright and positioned to the right of the glyph. How do we create a font which distinguishes between the Western European acute accent and the Polish kreska, even though the Unicode characters are the same? 同样的方式也可以应用在符号和连接#tr[character]上,但需要费些心思。在波兰语中有一种叫做`kreska`的变音符号,它看上去就像拉丁文中的尖音符。事实上,Unicode就是用尖音符来表示它,但它显示出来应该比拉丁文中的更竖直和靠右一些。既然在Unicode中是一个#tr[character],那在制作字体时要如何区分西欧的尖音符和波兰语中的`kreska`呢? // First, we should note that the Polish accent appears on some letters we may not have planned for: c, n, s and z - then again, we should also be aware that these letters also get an *acute* accent in other writing systems: Yoruba, Khmer, and Sanskrit transliteration amongst others. So we can't just rely on having the Polish forms for these. We need - as with the vowels - to create two separate versions: one with the Western European acute, and one with *kreska*. We look at [<NAME>'s web site](http://www.twardoch.com/download/polishhowto/kreska.html) to help get the design of our Polish accents right, and we should now end up with two sets of glyphs: `aacute`, `cacute`, ... and `aacute.PLK`, `cacute.PLK` and so on. 首先,我们应该注意到波兰尖音符会出现在一些计划之外/*啥意思?*/的字母上,比如`c`、`n`、`s`和`z`上。我们也要意识到,这些字母在其他书写系统中也可能被附加尖音符。比如约鲁巴语、高棉语、梵语转写字母等。我们不能只设计波兰样式,而是需要为这些元音和尖音符的组合都设计两个版本,一个使用西欧尖音符,另一个使用波兰的`kreska`。通过参考<NAME>的网站@<EMAIL>999,我们得以正确设计波兰尖音符。现在我们有了两套#tr[glyph]。一套是 `aacute`、`cacute`……;另一套则是 `aacute.PLK`、`cacute.PLK` 等。 // Now we know what we're doing: we use the `locl` feature as before to substitute in these glyphs when the input text is in Polish: 现在该怎么做就很清晰了。当输入文本是波兰语时,使用`locl`特性将这些#tr[glyph]#tr[substitution]为正确的样式: ```fea feature locl { script latn; language PLK; sub [aacute cacute ...] by [aacute.PLK cacute.PLK ...]; } locl; ``` #note[ // > This general pattern - language-specific substitution rules in the `locl` feature - can be used for a large number of localisation customizations, particularly those based on the Latin script (simply because they tend to be one-to-one glyph replacements.) Further examples of the pattern include Navajo forms of the ogonek accent, and choosing between variants of the letter U+014A LATIN CAPITAL LETTER ENG (Ŋ) - "N-form" variants in the case of Sami languages and "n-form" variants for African ones. 这种在`locl`特性中编写特定语言专用的#tr[substitution]规则的通用模式可以用于很多本地化自定义样式的场景中。这种模式在当地#tr[scripts]是基于拉丁文时尤其好用,因为此时基本上都是简单的一换一替换。这类例子可以在很多语言中找到,比如:反尾形符在纳瓦霍语条件下有特殊样式;`U+014A LATIN CAPITAL LETTER ENG`在萨米语系下使用N形式(Ŋ),而在非洲语言中使用n形式(#text(font: ("DejaVu Sans",))[Ŋ])。 ] // ### A detour about diacritics === 再论#tr[diacritic] // We've looked at the mark-to-base positioning and composition/decomposition substitutions in previous chapters. Why, then, do we need to design separate glyphs for `cacute` and `cacute.PLK` - can't we just design separate *accents* and have the OpenType system tie them together for us? In fact, why do we even need to include a `cacute` in our font *at all*? Can't we just have the font automatically compose the glyph out of the c base glyph and the acute mark, and automatically position it for us? Hey, why can't we do that for *all* our diacritic characters? As with many things in life, the answer is: sure, you *can*, but that doesn't mean you *should*. 在前面的章节中,我们已经探讨了#tr[glyph]的#tr[compose]和#tr[decompose],以及在基本#tr[glyph]上添加符号的#tr[substitution]规则。那么我们为什么需要设计单独的字形来表示 `cacute` 和 `cacute.PLK` 呢?我们不能只设计单独的音调符号,然后让OpenType帮我们把它们结合起来吗?事实上,我们究竟为什么要在字体里制作单独的 `cacute` #tr[glyph]呢?我们不能让字体自动将基本的`c`#tr[glyph]和尖音符组合起来,并自动进行定位吗?嘿,我们甚至可以对所有#tr[diacritic]都使用这种方式啊。答案是:当然,这种方案是*可行*的,但这并不意味着你就*应该*这么做。 // There are a few reasons why it's best to design and include precomposed forms of all the accented glyphs you're going to support, rather than rely on automatic composition. For one thing, there's kerning: it's much easier to test and edit the kerning for "Tå" in your font editor than adding tricky kern triplets in OpenType feature code. 最好还是将所有附加了音调的#tr[glyph]都按预先#tr[compose]好的方式进行整体设计,而不是依赖使用规则来进行自动#tr[compose]。这样建议有几个原因,其中之一是能更方便地处理#tr[kern]。在字体编辑软件中直接测试和编辑诸如`Tå`这样的#tr[kern]是非常简单的,而如果使用自动#tr[compose],你就需要非常小心地编写调整三个#tr[glyph]间距的OpenType特性代码。 // Another problem is that some software (notably Adobe InDesign) doesn't support it, and other software doesn't support it in reliable ways. This is an important area to understand because it highlights the interplay between *OpenType*'s understanding of characters and glyphs and *Unicode*'s understanding of characters and glyphs. 另一个问题是某些软件(尤其是Adobe InDesign)不支持这个功能,而在另一些软件中的支持也并不稳定。理解这个问题产生的原因非常重要,因为它告诉我们,OpenType对于#tr[character]和#tr[glyph]之间关系的理解和Unicode的理解并不一致。 // Remember how we talked about [Unicode normalization and decomposition](unicode.html#normalization-and-decomposition) in chapter 3, and how you can decompose a character like é (U+00E9 LATIN SMALL LETTER E WITH ACUTE) into two characters, U+0065 LATIN SMALL LETTER E and U+0301 COMBINING ACUTE ACCENT? That sounds very similar to the idea of having an "eacute" glyph which is made up of an "e" glyph and an "acutecomb" glyph. Similar... but unfortunately different. 我们在关于Unicode的章节中的#cross-ref(<heading:normalization-decomposition>, web-path: "/chapters/03-unicode/norm-decomp.typ", web-content: [一个小节]) #title-ref(<heading:normalization-decomposition>, web-path: "/chapters/03-unicode/norm-decomp.typ", web-content: [#tr[normalization]和#tr[decompose]])里介绍过,你可以把#tr[character]é(`U+00E9 LATIN SMALL LETTER E WITH ACUTE`)#tr[decompose]为`U+0065 LATIN SMALL LETTER E` 和 `U+0301 COMBINING ACUTE ACCENT`两个#tr[character]。这听起来和我们想用一个`e`和一个用于#tr[combine]的尖音符组合成`eacute`#tr[glyph]非常类似。嗯,确实很类似,但不幸的是它们并不完全一致。 // As it happens, if your font provides a "e" and a "acutecomb" glyph but *not* a precomposed "eacute", then some text layout systems will *only* render an e-acute if the input text is decomposed to U+0065 U+0301 (which is exceptionally rare) and will use a fallback font to display the precomposed form U+00E9. Others will automatically decompose a U+00E9 in the input stream to U+0065 U+0301 and display the two glyphs correctly. Some systems will correctly substitute a precomposed glyph for its decomposition specified using the `ccmp` feature, but then will fail to position the marks properly in the `mark` feature. 在现实世界中,如果你的字体是把`e`和尖音符分开设计,而没有提供预#tr[compose]的`eacute`的话,那么有些排版系统就只会在输入的文本是 `U+0065 U+0301` 时(这种情况非常罕见)才能正确渲染出#tr[compose]字形。如果它遇到`U+00E9`,就只能使用回退字体来显示了。还有些软件会将用户输入文本中的 `U+00E9` 自动#tr[decompose]为 `U+0065 U+0301`,然后正确显示这两个#tr[glyph]。也有些软件能够调用`ccmap`特性中的规则,将预#tr[compose]#tr[character]#tr[substitution]为它们的#tr[decompose]#tr[glyph],但却无法使用`mark`特性将它们正确定位。 // But having a precomposed glyph in the font will always work, both for composed Unicode characters like U+00E9 *and* for its decompositions, so that's why having the font contain all the glyphs you are likely to support is a better way to go. 但直接在字体里包含一个预#tr[compose]的#tr[glyph]在所有情况下都能正常工作,无论输入的Unicode#tr[character]是`U+00E9`还是其#tr[decompose]形式均可。所以直接在字体里放入所有希望支持的#tr[glyph]是一种更好的方式。
https://github.com/lucifer1004/leetcode.typ
https://raw.githubusercontent.com/lucifer1004/leetcode.typ/main/problems/p0017.typ
typst
#import "../helpers.typ": * #import "../solutions/s0017.typ": * = Letter Combinations of a Phone Number Given a string containing digits from `2-9` inclusive, return all possible letter combinations that the number could represent. Return the answer in *any order*. A mapping of digits to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters. #align(center)[#image("../images/p0017.png", width: 300pt)] #let letter-combinations-of-a-phone-number(digits) = { // Solve the problem here } #testcases( letter-combinations-of-a-phone-number, letter-combinations-of-a-phone-number-ref, ( (digits: "23"), (digits: ""), (digits: "2") ) )
https://github.com/freundTech/typst-matryoshka
https://raw.githubusercontent.com/freundTech/typst-matryoshka/main/tests/features/dont-fail/test.typ
typst
MIT License
#import "/lib.typ": compile #set page(fill: gray) #compile("#panic(\"Failed\")", dont-fail: true)
https://github.com/akrantz01/resume
https://raw.githubusercontent.com/akrantz01/resume/main/README.md
markdown
MIT License
# Resume My overengineered [resume] pipeline built using [Typst][typst] and [Cloudflare R2][r2]. Content is pulled from [`data.yml`](data.yml) and displayed based on the layouts in the [`layouts/`](layouts/) directory. Upon a push to the default branch, all the layouts are automatically rendered and uploaded to [R2][r2]. My most up-to-date resume can be found at [resumes.krantz.dev/default.pdf][default] or [krantz.to/resume][shortlink]. [default]: https://resumes.krantz.dev/default.pdf [resume]: https://krantz.to/resume [r2]: https://developers.cloudflare.com/r2/ [shortlink]: https://krantz.to/resume [typst]: https://github.com/typst/typst
https://github.com/frectonz/the-pg-book
https://raw.githubusercontent.com/frectonz/the-pg-book/main/book/140.%20word.html.typ
typst
word.html A Word to the Resourceful Want to start a startup? Get funded by Y Combinator. January 2012A year ago I noticed a pattern in the least successful startups we'd funded: they all seemed hard to talk to. It felt as if there was some kind of wall between us. I could never quite tell if they understood what I was saying.This caught my attention because earlier we'd noticed a pattern among the most successful startups, and it seemed to hinge on a different quality. We found the startups that did best were the ones with the sort of founders about whom we'd say "they can take care of themselves." The startups that do best are fire-and-forget in the sense that all you have to do is give them a lead, and they'll close it, whatever type of lead it is. When they're raising money, for example, you can do the initial intros knowing that if you wanted to you could stop thinking about it at that point. You won't have to babysit the round to make sure it happens. That type of founder is going to come back with the money; the only question is how much on what terms.It seemed odd that the outliers at the two ends of the spectrum could be detected by what appeared to be unrelated tests. You'd expect that if the founders at one end were distinguished by the presence of quality x, at the other end they'd be distinguished by lack of x. Was there some kind of inverse relation between resourcefulness and being hard to talk to?It turns out there is, and the key to the mystery is the old adage "a word to the wise is sufficient." Because this phrase is not only overused, but overused in an indirect way (by prepending the subject to some advice), most people who've heard it don't know what it means. What it means is that if someone is wise, all you have to do is say one word to them, and they'll understand immediately. You don't have to explain in detail; they'll chase down all the implications.In much the same way that all you have to do is give the right sort of founder a one line intro to a VC, and he'll chase down the money. That's the connection. Understanding all the implications — even the inconvenient implications — of what someone tells you is a subset of resourcefulness. It's conversational resourcefulness.Like real world resourcefulness, conversational resourcefulness often means doing things you don't want to. Chasing down all the implications of what's said to you can sometimes lead to uncomfortable conclusions. The best word to describe the failure to do so is probably "denial," though that seems a bit too narrow. A better way to describe the situation would be to say that the unsuccessful founders had the sort of conservatism that comes from weakness. They traversed idea space as gingerly as a very old person traverses the physical world. [1]The unsuccessful founders weren't stupid. Intellectually they were as capable as the successful founders of following all the implications of what one said to them. They just weren't eager to.So being hard to talk to was not what was killing the unsuccessful startups. It was a sign of an underlying lack of resourcefulness. That's what was killing them. As well as failing to chase down the implications of what was said to them, the unsuccessful founders would also fail to chase down funding, and users, and sources of new ideas. But the most immediate evidence I had that something was amiss was that I couldn't talk to them.Notes[1] A YC partner wrote:My feeling with the bad groups is that coming into office hours, they've already decided what they're going to do and everything I say is being put through an internal process in their heads, which either desperately tries to munge what I've said into something that conforms with their decision or just outright dismisses it and creates a rationalization for doing so. They may not even be conscious of this process but that's what I think is happening when you say something to bad groups and they have that glazed over look. I don't think it's confusion or lack of understanding per se, it's this internal process at work.With the good groups, you can tell that everything you say is being looked at with fresh eyes and even if it's dismissed, it's because of some logical reason e.g. "we already tried that" or "from speaking to our users that isn't what they'd like," etc. Those groups never have that glazed over look.Thanks to <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> for reading drafts of this.
https://github.com/liuxu89/Principles
https://raw.githubusercontent.com/liuxu89/Principles/main/src/chap-1/sec-1.typ
typst
#import "@preview/physica:0.9.2": * #import "/book.typ": book-page #show: book-page.with(title: "Hello, typst") // = The Principle of Superposition = The need for a quantum theory #smallcaps[Classical] Mechanics has been developed continuously from the time of Newton and applied to an ever-widening range of dynamical systems, including the electromagnetic field in interaction with matter. // The underlying ideas and the laws governing their application form a simple and elegant scheme, which one would be inclined to think could not be seriously modified without having all its attractive features spoilt. // Nevertheless it has been found possible to set up a new scheme, called quantum mechanics, which is more suitable for the description of phenomena on the atomic scale and which is in some respects more elegant and satisfying than classical scheme. // This possibility is due to the changes which the new scheme involves being of a very profound character and not clashing with the features of the classical theory that make it so attractive, as a result of which all these features can be incorporated in the new scheme. The necessity for a departure from classical mechanics is clearly shown by experimental results. // In the first place the forces known in classical electrodynamics are inadequate for the explanation of the remarkable stability of atoms and molecules, which is necessary in order that materials may have any definite physical and chemical properties at all. // The introduction of new hypothetical forces will not save the situation, since there exist general principles of classical mechanics, holding for all kinds of forces, leading to results in direct disagreement with observation. // For example, if an atomic system has its equilibrium disturbed in any way and is then left alone, it will be set in oscillation and the oscillations will get impressed on surrounding electromagnetic field, so that their frequencies may be observed with a spectroscope. // Now whatever the laws of force governing the equilibrium, one would expect to be able to include the various frequencies in scheme comprising certain fundamental frequencies and their harmonics. // This is not observed to be the case. // Instead, there is observed a new and unexpected connexion between the frequencies, called Ritz's Combination Law of Spectroscopy, according to which all the frequencies can be expressed as differences between certain terms, the number of terms being much less than the number of frequencies. // This law is quite unintelligible from the classical standpoint. One might try to get over the difficulty without departing from classical mechanics by assuming each of the spectroscopically observed frequencies to be a fundamental frequency with its own degree of freedom, the laws of force being such that the harmonic vibrations do not occur. // Such a theory will not do, however, even apart from the fact that it would give no explanation of Combination Law, since it would immediately bring one into conflict with the experimental evidence on specific heats. // Classical statistical mechanics enables one to establish a general connexion between the total number of degrees of freedom of an assembly of vibrating systems and its specific heat. // If one assumes all the spectroscopic frequencies of an atom to correspond to different degrees of freedom, one would get a specific heat for any kind of matter very much greater than the observed value. // In fact the observed specific heats at ordinary temperatures are given fairly well by a theory that takes into account merely the motion of each atom as a whole and assigns no internal motion to it at all. This leads us to a new clash between classical mechanics and the results of experiment. // There must certainly be some internal motion in an atom to account for its spectrum, but the internal degrees of freedom, for some classically inexplicable reason, do not contribute to the specific heat. // A similar clash is found in connexion with the energy of oscillation of the electromagnetic field in a vacuum. // Classical mechanics requires the specific heat corresponding to this energy to be infinite, but it is observed to be quite finite. // A general conclusion from experimental results is that oscillations of high frequency do not contribute their classical quota to the specific heat. As another illustration of the failure of classical mechanics we may consider the behaviour of light. // We have, on the one had, the phenomena of interference and diffraction, which can be explained only on the basis of a wave theory; on the other, phenomena such as photo-electric emission and scattering by free electrons, which show that light is composed of small particles. // These particles, which are called photons, have each a definite energy and momentum, depending on the frequency of light, and appear to have just as real an existence as electrons, or any other particles known in physics. // A fraction of photon is never observed. Experiment have shown that this anomalous behaviour is not peculiar to light, but is quite general. // All material particles have wave properties, which can be exhibited under suitable conditions. // We have here a very striking and general example of the breakdown of classical mechanics---not merely an inaccuracy in its laws of motion but #underline[an inadequacy of its concepts to supply us with description of atomic events.] The necessity to depart from classical ideas when one wishes to account for the ultimate structure of matter may be seen, not only from experimentally established facts, but also from general philosophical grounds. // In a classical explanation of the constitution of matter, one would assume it to be made up of a large number of small constituent parts and one would postulate laws for the behaviour of these parts, from which the laws of the matter in bulk could be deduced. // This would not complete the explanation, however, since the question of the structure and stability of the constituent parts is left untouched. // To go into this question, it becomes necessary to postulate that each constituent part is itself made up of smaller parts, in terms of which its behaviour is to be explained. // There is clearly no end to this procedure, sot that one can never arrive at the ultimate structure of matter on these lines, // So long as #underline[big] and #underline[small] are merely relative concepts, it is no help to explain the big in terms of the small. // It is therefore necessary to modify classical ideas in such a way as to give an absolute meaning to size. At this stage it becomes important to remember that science is concerned only with observable things and that we can observe an object only by letting it interact with some outside influence. // An act of observation is thus necessarily accompanied by some disturbance of the object observed. // We may define an object to be big when the disturbance accompanying our observation of it may be neglected, and small when the disturbance cannot be neglected. // This definition is close agreement with common meanings of big and small. It is usually assumed that, by being careful, we may cut down the disturbance accompanying our observation to any desired extent. // The concepts of big and small are then purely relative and refer to the gentleness of our means of observation as well as to the object being described. // In order to give an absolute meaning to size, such as is required for any theory of the ultimate structure of matter, we have to assume that #underline[there is a limit to the fineness of our powers of observation and the smallness of the accompanying disturbance---a limit which is inherent in the nature of things and can never be surpassed by improved technique or increased skill on the part of the observer.] // If the object under observation is such that the unavoidable limiting disturbance is negligible, then the object is big in the absolute sense and we may apply classical mechanics to it. // If, on the other hand, the limiting disturbance is not negligible, then the object is small in the absolute sense and we require a new theory for dealing with it. A consequence of the preceding discussion is that we must revise our ideas of causality. // Causality applies only to a system which is left undisturbed. // If a system is small, we cannot observe it without producing a serious disturbance and hence we cannot expect to find any causal connexion between the results of our observations. // Causality will still be assumed to apply to undisturbed systems and the equations which will be set up to describe an undisturbed system will be differential equations expressing a causal connexion between conditions at one time and conditions at a later time. // These equations will be in close correspondence with the equations of classical mechanics, but they will be connected only indirectly with the results of observations. // There is an unavoidable indeterminacy in the calculation of observational results, the theory enabling us to calculate in general only the probability of our obtaining a particular result when wen make an observation.
https://github.com/tfachada/thesist
https://raw.githubusercontent.com/tfachada/thesist/main/src/layout.typ
typst
MIT License
#import "figure-numbering.typ": * #import "utils.typ": * #let thesis( lang: none, cover-image: none, title: none, subtitle: none, author: none, degree: none, supervisor: none, co-supervisor: none, chairperson: none, committee-members: (), date: none, hide-figure-list: none, hide-table-list: none, hide-algorithm-list: none, hide-code-list: none, hide-glossary: none, included-content: (), hide-acknowledgments: false, hide-abstract: false, pic-mode: false, body ) = { /* BASIC DOCUMENT PROPERTIES */ set document(title: title) set page(margin: (2.5cm)) set text(font: "<NAME>", size: 10pt, lang: lang) /* STRINGS BY LANGUAGE */ let STRING_DEGREE = "Thesis to obtain the Master of Science Degree in" let STRING_SUPERVISOR = "Supervisor: " let STRING_SUPERVISORS = "Supervisors: " let STRING_COMMITTEE = "Examination Committee" let STRING_CHAIRPERSON = "Chairperson: " let STRING_MEMBER = "Member of the Committee: " let STRING_MEMBERS = "Members of the Committee: " let STRING_DECLARATION_TITLE = "Declaration" let STRING_DECLARATION_BODY = "I declare that this document is an original work of my own authorship and that it fulfills all the requirements of the Code of Conduct and Good Practices of the Universidade de Lisboa." let STRING_ACKNOWLEDGMENTS = "Acknowledgments" let STRING_OUTLINE = "Contents" let STRING_OUTLINE_FIGURES = "List of Figures" let STRING_OUTLINE_TABLES = "List of Tables" let STRING_OUTLINE_ALGORITHMS = "List of Algorithms" let STRING_OUTLINE_CODE = "Listings" let STRING_ALGORITHM = "Algorithm" let STRING_CODE = "Listing" if lang == "pt" { STRING_DEGREE = "Dissertação para obtenção do Grau de Mestre em" STRING_SUPERVISOR = "Orientador: " STRING_SUPERVISORS = "Orientadores: " STRING_COMMITTEE = "Júri" STRING_CHAIRPERSON = "Presidente: " STRING_MEMBER = "Vogal: " STRING_MEMBERS = "Vogais: " STRING_DECLARATION_TITLE = "Declaração" STRING_DECLARATION_BODY = "Declaro que o presente documento é um trabalho original da minha autoria e que cumpre todos os requisitos do Código de Conduta e Boas Práticas da Universidade de Lisboa." STRING_ACKNOWLEDGMENTS = "Agradecimentos" STRING_OUTLINE = "Índice" STRING_OUTLINE_FIGURES = "Lista de Figuras" STRING_OUTLINE_TABLES = "Lista de Tabelas" STRING_OUTLINE_ALGORITHMS = "Lista de Algoritmos" STRING_OUTLINE_CODE = "Lista de trechos de Código" STRING_ALGORITHM = "Algoritmo" STRING_CODE = "Código" } /* TITLE PAGE */ align(center,{ align(left, image("IST.png", width: 30%)) v(1cm) if cover-image != none { cover-image } else { v(2cm) } v(1cm) text(16pt, strong(title)) if subtitle != none{ text(14pt, "\n\n" + subtitle) } v(1cm) text(14pt, strong(author)) v(1cm) if not pic-mode { text(12pt, STRING_DEGREE) } text(16pt, "\n\n" + strong(degree)) v(1cm) if co-supervisor == none { text(12pt, STRING_SUPERVISOR + supervisor) } else { text(12pt, STRING_SUPERVISORS + supervisor) text(12pt, "\n" + co-supervisor) } v(1cm) if not pic-mode { text(14pt, strong(STRING_COMMITTEE)) text(12pt, "\n\n" + STRING_CHAIRPERSON + chairperson) text(12pt, "\n" + STRING_SUPERVISOR + supervisor) if committee-members.at(1) == none { text(12pt, "\n" + STRING_MEMBER) } else{ text(12pt, "\n" + STRING_MEMBERS) } text(12pt, committee-members.at(0)) if committee-members.at(1) != none { text(12pt, "\n" + committee-members.at(1)) if committee-members.at(2) != none { text(12pt, "\n" + committee-members.at(2)) } } } align(bottom, text(14pt, strong(date)) ) }) pagebreak(to: "odd") /* POST-COVER CONTENT FORM SETUP */ // Set heading sizes and spacings set heading(numbering: "1.1") show heading: set block(above: 2.2em, below: 1.5em) show heading.where(level: 1): set text(size: 20pt) show heading.where(level: 2): set text(size: 16pt) show heading.where(level: 3): set text(size: 14pt) // Bookmark outlines (indices) in the generated PDF show outline: set heading(bookmarked: true) // Make the Lists of Figures/Tables/... more concise and easier to read, like in the LaTeX templates show outline.entry: it => context { if it.element.has("kind") { let loc = it.element.location() if counter(figure.where(kind: it.element.kind)).at(loc).first() == 1 { v(1em) } show link: set text(rgb("000000")) link(loc, box(it.body.children.at(2), width: 2.6em) // figure numbering + it.body.children.slice(4).join() // figure caption + box(it.fill, width: 1fr) + it.page ) } else { it } } // Allow a caption in an outline to receive different treatment from the original (see flex-caption in utils.typ) let in-outline = state("in-outline", false) show outline: it => { in-outline.update(true) it in-outline.update(false) } // Chapter-relative numbering for figures (see figure-numbering.typ) show: set-figure-numbering.with(new-format: "1.1") // Gap between figure and caption set figure(gap: 1em) // Better Portuguese name for listings show figure.where(kind: raw): set figure(supplement: STRING_CODE) // Optional new figure type show figure.where(kind: "algorithm"): set figure(supplement: STRING_ALGORITHM) // Put captions on top for non-image figures set figure.caption(position: top) show figure.where(kind: image): set figure.caption(position: bottom) // Color of both normal and reference links show link: set text(rgb("696969")) show ref: set text(rgb("696969")) // Justification and spacing of main text // Note: Per IST rules, line spacing needs to be "1.5 lines". The definition of line spacing is very ambiguous across platforms, and "leading: 1.05em" recreates the 1.5 of the LaTeX templates (or misses it by a microscopic amount). Note that "em" means the font size. set par( justify: true, first-line-indent: 1.5em, leading: 1.05em ) set block(spacing: 2.5em) set par(spacing: 1.05em) set list(indent: 2em) // Size and line spacing of footnotes (.7 font size = 1 "line"; explanation above) show footnote.entry: set text(size: 9pt) set footnote.entry(gap: .7*9pt) show footnote.entry: set par(leading: .7*9pt) /* POST-COVER CONTENT */ // Initial page numbering set page( footer: [ #set align(center) #set text(9pt) #context counter(page).display("i") ] ) counter(page).update(1) // Declaration page heading(STRING_DECLARATION_TITLE, numbering: none, outlined: false) text(STRING_DECLARATION_BODY) pagebreak(to: "odd") // Acknowledgments page (recall the included-content array from main.typ) if not hide-acknowledgments { heading(STRING_ACKNOWLEDGMENTS, numbering: none, outlined: false, bookmarked: true) included-content.at(0) pagebreak(to: "odd") } // Abstracts and keywords if not hide-abstract { heading("Abstract", numbering: none, outlined: false, bookmarked: true) included-content.at(1) v(1cm) heading("Keywords", level: 2, numbering: none, outlined: false) included-content.at(2) pagebreak(to: "odd") heading("Resumo", numbering: none, outlined: false, bookmarked: true) included-content.at(3) v(1cm) heading("Palavras Chave", level: 2, numbering: none, outlined: false) included-content.at(4) pagebreak(to: "odd") } // Outlines { show outline.entry.where(level: 1): it => { set text(weight: "bold") set outline(fill: none) v(12pt) show link: set text(rgb("000000")) link(it.element.location(), it.body + h(1fr) + it.page) } outline( title: STRING_OUTLINE, indent: auto ) } if not hide-figure-list { pagebreak(to: "odd") outline( title: STRING_OUTLINE_FIGURES, target: figure.where(kind: image) ) } if not hide-table-list { pagebreak(to: "odd") outline( title: STRING_OUTLINE_TABLES, target: figure.where(kind: table) ) } if not hide-algorithm-list { pagebreak(to: "odd") outline( title: STRING_OUTLINE_ALGORITHMS, target: figure.where(kind: "algorithm") ) } if not hide-code-list { pagebreak(to: "odd") outline( title: STRING_OUTLINE_CODE, target: figure.where(kind: raw) ) } if not hide-glossary { pagebreak(to: "odd") { set heading(numbering: none, outlined: false, bookmarked: true) included-content.at(5) } } pagebreak(to: "odd") // "Figure x:" in bold // (putting here because glossarium uses figure captions for the entries) show figure.caption: it => { let sup = if it.supplement != none [#it.supplement~] let num = if it.numbering != none { context it.counter.display(it.numbering) } strong(sup + num + it.separator) + it.body } // Reset page numbering in Arabic numerals set page( footer: [ #set align(center) #set text(9pt) #context counter(page).display("1") ] ) counter(page).update(1) // Ready body }
https://github.com/TechnoElf/mqt-qcec-diff-thesis
https://raw.githubusercontent.com/TechnoElf/mqt-qcec-diff-thesis/main/content/implementation.typ
typst
= Implementation This section discusses the solutions developed in this work. First, Djikstra's algorithm, Myers' algorithm and the patience algorithm are elaborated. A pseudocode representation of the chosen diff algorithms is presented for this purpose. Next, the development of a visualisation tool for edit scripts of quantum circuits, Kaleidoscope, is explored. The adaptation of this code into a functional equivalence checking flow is also discussed. Furthermore, the section presents the newly developed benchmarking framework for @mqt @qcec. Finally, possible heuristics that can process edit scripts for improved performance are explored. #include "implementation/algorithms.typ" #include "implementation/visualisation.typ" #include "implementation/application.typ" #include "implementation/benchmarking.typ" #include "implementation/postprocessing.typ"
https://github.com/jgm/typst-hs
https://raw.githubusercontent.com/jgm/typst-hs/main/test/typ/text/raw-code-00.typ
typst
Other
#set page(width: 180pt) #set text(6pt) ```typ = Chapter 1 #lorem(100) #let hi = "<NAME>" #show heading: emph ```
https://github.com/KVM-Explorer/AssignmentTemplate
https://raw.githubusercontent.com/KVM-Explorer/AssignmentTemplate/main/README.md
markdown
# AssignmentTemplate A template for assigenment or report Typst 作业报告模板 ![alt text](show.png) 引入template模板配置基础信息,其余正常使用 ```typst import template #import "template.typ": * #show: template.with( title: [Full Title], short_title: "Short Title", description: [ ], date: datetime(year: 2024, month: 07, day: 15), authors: ( ( name: "xxx", ), ), affiliations: ( (id: "1", name: "University"), ), bibliography_file: "ref.bib", paper_size: "a4", cols: 1, text_font: "XCharter", code_font: "Cascadia Mono", accent: black, ) ``` ## Demo演示 clone 到本地,使用typst编译。
https://github.com/SWATEngineering/Docs
https://raw.githubusercontent.com/SWATEngineering/Docs/main/src/2_RTB/PianoDiProgetto/sections/ConsuntivoSprint/TerzoSprint.typ
typst
MIT License
#import "../../const.typ": Re_cost, Am_cost, An_cost, Ve_cost, Pr_cost, Pt_cost #import "../../functions.typ": rendicontazioneOreAPosteriori, rendicontazioneCostiAPosteriori, glossary == Terzo #glossary[sprint] *Inizio*: Venerdì 08/12/2023 *Fine*: Giovedì 14/12/2023 #rendicontazioneOreAPosteriori(sprintNumber: "03") #rendicontazioneCostiAPosteriori(sprintNumber: "03") === Analisi a posteriori Il consuntivo risulta essere significativamente più simile al preventivo di quanto non lo sia stato in precedenza nel numero di ore, segno che la riduzione della durata degli #glossary[sprint] a 1 settimana ha contribuito a rendere la pianificazione più precisa. L'unica discrepanza lieve consiste nell'avere più ore da Amministratore di quante preventivate inizialmente, ma ciò è dovuto semplicemente al fatto che sono state spese risorse aggiuntive per realizzare delle automazioni all'interno del sito vetrina e di determinati documenti. L'adozione di nuove automazioni per redigere i documenti ha inizialmente portato all'emergere del rischio RT2 o Difficoltà nell'adozione degli strumenti di gestione del progetto; tuttavia, l'inclusione di contenuti procedurali e non solo narrativi o descrittivi all'interno delle _Norme di Progetto_ ha abilitato il team ad utilizzare il documento come riferimento in caso di dubbi e a seguire le procedure dettagliate al suo interno, utilizzando le automazioni come prescritto. La seconda misura preventiva, ossia l'inclusione di dubbi nell'#glossary[ordine del giorno] degli incontri interni, non è stata particolarmente utilizzata per dare voce a dubbi sull'utilizzo degli strumenti di gestione; si è rivelato più efficacie semplicemente chiedere informazioni e chiarimenti al componente del team che aveva precedentemente implementato e predisposto tali strumenti.
https://github.com/505000677/2024SpringNote
https://raw.githubusercontent.com/505000677/2024SpringNote/main/tizart.typ
typst
#import "theorems.typ": * #import "@preview/statastic:0.1.0" #let project(title: "", subtitle: "", authors: (), size: 12pt, date: datetime.today(), body) = { // Set the document's basic properties. set document(author: authors.map(a => a.name), title: title) set page(numbering: "1", number-align: center) set text(font: ( "Linux Libertine", "Source Han Serif SC", "Source Han Serif", // "Twitter Color Emoji Regular" ), lang: "en", size: size) // show math.equation: set text(font: ( // )) // set par(first-line-indent: 1.8em) // Title row. align(center)[ #block(text(weight: 700, 1.75em, title)) ] if subtitle != "" { align(center)[ #block(text(1.25em, subtitle)) ] } align(center)[ #v(1em, weak: true) #date.display("[month repr:short] [day], [year]") ] // Author information. pad( top: 0.5em, bottom: 0.5em, x: 2em, grid( columns: (1fr,) * calc.min(3, authors.len()), gutter: 1em, ..authors.map(author => align(center)[ *#author.name* \ #author.email ]), ), ) // Main body. set par(justify: true) body } #let theorem = thmbox("Theorem").with(fill: rgb("#eeffee")) #let 定理 = thmbox("定理").with(fill: rgb("#eeffee")) #let lemma = thmbox("Lemma").with(fill: rgb("#eeffee")) #let 引理 = thmbox("引理").with(fill: rgb("#eeffee")) #let prop = thmbox("Proposition").with(fill: rgb("#eeffee")) #let 命题 = thmbox("命题").with(fill: rgb("#eeffee")) #let corollary = thmbox("Corollary").with(fill: rgb("#eeffee")) #let 推论 = thmbox("推论").with(fill: rgb("#eeffee")) #let question = thmbox("Question").with(fill: rgb("#eeffee")) #let 问题 = thmbox("问题").with(fill: rgb("#eeffee")) #let definition = thmbox("Definition").with(fill: rgb("#f1f8f6")) #let 定义 = thmbox("定义").with(fill: rgb("#f1f8f6")) #let example = thmbox("Example").with(fill: rgb("#f7f7fd")) #let 例子 = thmbox("例子").with(fill: rgb("#f7f7fd")) #let counterexample = thmbox("Counterexample").with(fill: rgb("#fff7f7")) #let 反例 = thmbox("反例").with(fill: rgb("#fff7f7")) #let clarification = thmbox("Clarification").with(fill: rgb("#fff7f7")) #let 澄清 = thmbox("澄清").with(fill: rgb("#fff7f7")) #let observation = thmbox("Observation").with(fill: rgb("#fff7f7")) #let exercise = thmbox("Exercise").with(fill: rgb("#fdeee2")) #let 练习 = thmbox("练习").with(fill: rgb("#fdeee2")) #let mybot = box(sym.bot) #let myat = box($@$) #let emsp = h(2em) #let proof = thmplain("Proof").with( base: "theorem", bodyfmt: body => [#body #h(1fr) $square$], numbering: none ) #let 证明 = thmplain("证明").with( base: "定理", bodyfmt: body => [#body #h(1fr) $square$], numbering: none ) #let implies = sym.arrow.r.double.long #let iff = sym.arrow.l.r.double.long #let stepsTo = sym.arrow.r.bar.long #let yuanxiang = (name: "<NAME>", email: "<EMAIL>")
https://github.com/KaiserY/mdbook-typst-pdf
https://raw.githubusercontent.com/KaiserY/mdbook-typst-pdf/main/src/assets/template.typ
typst
Apache License 2.0
#set text( lang: "zh", font: ( "Noto Sans", "Noto Sans SC", "Noto Sans KR", "Noto Sans Thai", "Noto Sans Arabic", "Noto Sans Hebrew", "Noto Sans Devanagari", "Noto Emoji", ), ) #show link: underline #show raw.where(block: true): block.with( width: 100%, fill: luma(240), inset: 10pt, radius: 4pt, ) #show quote.where(block: true): block.with( width: 100%, fill: rgb("#f1f6f9"), inset: 10pt, radius: 4pt, ) #set page( header: context { if counter(page).get().first() > 1 [ MDBOOK_TYPST_PDF_TITLE ] }, footer: context { if counter(page).get().first() > 1 [ #counter(page).display( "1/1", both: true, ) ] }, ) #align(center, text(17pt)[ *MDBOOK_TYPST_PDF_TITLE* ]) #pagebreak() #outline(depth: 2, indent: 1em) #pagebreak() /**** MDBOOK_TYPST_PDF_PLACEHOLDER ****/
https://github.com/ClazyChen/Table-Tennis-Rankings
https://raw.githubusercontent.com/ClazyChen/Table-Tennis-Rankings/main/history_CN/2024/MS-04.typ
typst
#set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (1 - 32)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [1], [王楚钦], [CHN], [3559], [2], [樊振东], [CHN], [3411], [3], [梁靖崑], [CHN], [3353], [4], [马龙], [CHN], [3286], [5], [菲利克斯 勒布伦], [FRA], [3258], [6], [林高远], [CHN], [3231], [7], [林昀儒], [TPE], [3210], [8], [张本智和], [JPN], [3167], [9], [周启豪], [CHN], [3121], [10], [林诗栋], [CHN], [3118], [11], [张禹珍], [KOR], [3093], [12], [蒂姆 波尔], [GER], [3084], [13], [雨果 卡尔德拉诺], [BRA], [3082], [14], [李尚洙], [KOR], [3058], [15], [特鲁斯 莫雷加德], [SWE], [3047], [16], [邱党], [GER], [3040], [17], [田中佑汰], [JPN], [3033], [18], [林钟勋], [KOR], [3025], [19], [达科 约奇克], [SLO], [3002], [20], [西蒙 高兹], [FRA], [2994], [21], [户上隼辅], [JPN], [2988], [22], [迪米特里 奥恰洛夫], [GER], [2987], [23], [基里尔 格拉西缅科], [KAZ], [2986], [24], [马克斯 弗雷塔斯], [POR], [2975], [25], [向鹏], [CHN], [2969], [26], [松岛辉空], [JPN], [2965], [27], [乔纳森 格罗斯], [DEN], [2965], [28], [克里斯坦 卡尔松], [SWE], [2961], [29], [孙闻], [CHN], [2953], [30], [卡纳克 贾哈], [USA], [2949], [31], [赵大成], [KOR], [2941], [32], [帕特里克 弗朗西斯卡], [GER], [2941], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (33 - 64)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [33], [贝内迪克特 杜达], [GER], [2935], [34], [黄镇廷], [HKG], [2923], [35], [刘丁硕], [CHN], [2916], [36], [于子洋], [CHN], [2914], [37], [篠塚大登], [JPN], [2914], [38], [斯蒂芬 门格尔], [GER], [2911], [39], [梁俨苧], [CHN], [2901], [40], [周恺], [CHN], [2900], [41], [吴晙诚], [KOR], [2895], [42], [庄智渊], [TPE], [2889], [43], [薛飞], [CHN], [2875], [44], [托米斯拉夫 普卡], [CRO], [2874], [45], [徐瑛彬], [CHN], [2874], [46], [赵子豪], [CHN], [2872], [47], [蒂亚戈 阿波罗尼亚], [POR], [2868], [48], [奥维迪乌 伊奥内斯库], [ROU], [2864], [49], [吉村真晴], [JPN], [2857], [50], [安东 卡尔伯格], [SWE], [2850], [51], [艾利克斯 勒布伦], [FRA], [2846], [52], [宇田幸矢], [JPN], [2840], [53], [夸德里 阿鲁纳], [NGR], [2837], [54], [奥马尔 阿萨尔], [EGY], [2834], [55], [马蒂亚斯 法尔克], [SWE], [2829], [56], [卢文 菲鲁斯], [GER], [2809], [57], [雅克布 迪亚斯], [POL], [2809], [58], [上田仁], [JPN], [2806], [59], [徐海东], [CHN], [2804], [60], [袁励岑], [CHN], [2802], [61], [安德斯 林德], [DEN], [2793], [62], [ROBLES Alvaro], [ESP], [2788], [63], [冯翊新], [TPE], [2787], [64], [NOROOZI Afshin], [IRI], [2784], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (65 - 96)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [65], [沙拉特 卡马尔 阿昌塔], [IND], [2781], [66], [帕纳吉奥迪斯 吉奥尼斯], [GRE], [2778], [67], [<NAME>], [GER], [2776], [68], [尼马 阿拉米安], [IRI], [2774], [69], [高承睿], [TPE], [2772], [70], [吉村和弘], [JPN], [2770], [71], [<NAME>], [FRA], [2769], [72], [LAKATOS Tamas], [HUN], [2762], [73], [IONESCU Eduard], [ROU], [2761], [74], [安宰贤], [KOR], [2759], [75], [及川瑞基], [JPN], [2754], [76], [诺沙迪 阿拉米扬], [IRI], [2753], [77], [木造勇人], [JPN], [2751], [78], [牛冠凯], [CHN], [2750], [79], [利亚姆 皮切福德], [ENG], [2749], [80], [赵胜敏], [KOR], [2746], [81], [<NAME>], [KOR], [2743], [82], [安德烈 加奇尼], [CRO], [2741], [83], [<NAME>], [POL], [2739], [84], [曾蓓勋], [CHN], [2737], [85], [神巧也], [JPN], [2736], [86], [<NAME>], [AUT], [2729], [87], [<NAME>], [ITA], [2729], [88], [吉山僚一], [JPN], [2725], [89], [曹巍], [CHN], [2724], [90], [PERSSON Jon], [SWE], [2720], [91], [CHEN Yuanyu], [CHN], [2718], [92], [ORT Kilian], [GER], [2714], [93], [<NAME>], [POL], [2714], [94], [<NAME>], [IND], [2701], [95], [MONTEIRO Joao], [POR], [2701], [96], [汪洋], [SVK], [2698], ) )#pagebreak() #set text(font: ("Courier New", "NSimSun")) #figure( caption: "Men's Singles (97 - 128)", table( columns: 4, [排名], [运动员], [国家/地区], [积分], [97], [STUMPER Kay], [GER], [2696], [98], [廖振珽], [TPE], [2696], [99], [艾曼纽 莱贝松], [FRA], [2692], [100], [<NAME>], [SWE], [2689], [101], [<NAME>], [POR], [2682], [102], [王臻], [CAN], [2679], [103], [<NAME>], [BEL], [2679], [104], [<NAME>], [IND], [2678], [105], [<NAME>], [PRK], [2676], [106], [<NAME>], [FRA], [2675], [107], [<NAME>], [ROU], [2673], [108], [<NAME>], [POL], [2671], [109], [MATSUDAIRA Kenji], [JPN], [2670], [110], [ABDEL-AZIZ Youssef], [EGY], [2670], [111], [弗拉迪斯拉夫 乌尔苏], [MDA], [2667], [112], [<NAME>], [FRA], [2661], [113], [<NAME>], [FRA], [2661], [114], [<NAME>], [SWE], [2654], [115], [ZELJKO Filip], [CRO], [2654], [116], [RASSENFOSSE Adrien], [BEL], [2653], [117], [<NAME>], [CZE], [2653], [118], [<NAME>], [SLO], [2650], [119], [特里斯坦 弗洛雷], [FRA], [2649], [120], [<NAME>], [ROU], [2647], [121], [<NAME>], [FRA], [2646], [122], [<NAME>], [AUT], [2645], [123], [王晨策], [CHN], [2644], [124], [<NAME>], [CZE], [2641], [125], [<NAME>], [KOR], [2640], [126], [卢卡 姆拉德诺维奇], [LUX], [2639], [127], [<NAME>], [CRO], [2637], [128], [<NAME>], [CHN], [2637], ) )
https://github.com/danfunc/UseTYPST.cmake
https://raw.githubusercontent.com/danfunc/UseTYPST.cmake/main/subdir/subdir_example.typ
typst
#emph[Hello Subdir!] \ #emoji.face \ #"hello".len()
https://github.com/mrknorman/evolving_attention_thesis
https://raw.githubusercontent.com/mrknorman/evolving_attention_thesis/main/05_parameters/05_the_problem_with_parameters.typ
typst
#set page(numbering: "1", number-align: center) #set math.equation(numbering: it => {[5.#it]}) #counter(math.equation).update(0) #import "../notation.typ": matrixn = Dragonn: Exploring Deep Gravitational-Wave Classifier Hyperparameter Space with Genetic Algorithms <dragonn-sec> == The Problem with Parameters <hyperparameters-section> An applicable machine-learning approach can be found for almost every problem in gravitational-wave data science @gw_machine_learning_review. That does not mean that machine learning should be applied to every problem in gravitational-wave data science. We must be careful with a liberal application of machine learning approaches and always keep the goal in mind: what exactly are we trying to achieve by applying this particular method? As described in the "No free lunch theorem" @no_free_lunch, for every possible algorithm, there are advantages and disadvantages, and there is no algorithm that completely supersedes another in all cases. This means a rigorous and systematic method for comparing different techniques is required. This problem is confounded with deep learning techniques, as the number of free parameters when designing and optimising artificial neural networks is vast --- technically infinite in the non-real case where network size is not a constraint. There are a huge number of adaptations that can be applied to a network @architecture_review @deep_learning_review @conv_review @attention_is_all_you_need, and the number of developed layer types and model architectures is considerable and increasing in an almost exponential fashion year on year @exponential_growth. Even ignoring the number of different types of network modifications, most modifications have multiple associated parameters, including parameters specifying the design of individual network layers @cnn_review @attention_is_all_you_need @dropout_ref @batch_normalisation_ref. We label any parameter to do with the model design or any parameter that is not optimized during the model training process as a *hyperparameter* @hyperparameter_optimisation_review @cnn_hyperparameters. Hyperparameters include values to do with the greater structure of the network, such as the type and number of layers in a network and the configuration of the layers themselves, such as the number of neurons in a dense layer, or the number of filters in the convolutional layer; the training of the network, such as the learning rate, the number of epochs, and the optimiser; as well as all the parameters associated with the training dataset @hyperparameter_optimisation_review @cnn_hyperparameters. Essentially, hyperparameters encompass all parameters that must be determined before the initiation of model training. This chapter will first give a brief overview of available hyperparameter optimisation methods, then discuss why evolutionary population-based methods were chosen as the hyperparameter optimisation technique of choice, followed by a demonstration of the use of hyperparameter optimisation to find interesting parts of hyperparameter space for further exploration. We will conclude by discussing how this work has been pivotal in developing MLy @MLy, a machine learning pipeline currently preparing for live deployment in the latter half of the fourth joint observing run. The goal of any given hyperparameter optimisation process is to maximise the model's performance given a specific *objective function* @hyperparameter_optimisation_review @cnn_hyperparameters. This objective function could be as simple as minimising the model loss, but other performance metrics might also be important to us, such as model inference time or memory usage --- or, as is the case for gravitational wave transient detection, minimising values that it would not necessarily make sense to have as part of the loss function, like the False Alarm Rate (FAR) @false_alarm_rate_ref. If we naively gave models a loss function that only allows a once-in-one-hundred-year FAR, they might never produce a positive result at all @false_alarm_rate_ref. It would be hard to balance such a low FAR requirement with other terms in the loss function, and balancing loss function terms is always a difficult challenge that can lead to training instability. If one is to compare two different sets of architectures, for example, comparing fully connected networks @perceptron_and_neural_network_chapter to networks with some convolutional layers @deep_learning_review @conv_review, a method must be used to determine all of these hyperparameters. Many, if not most, of these hyperparameters, will have some effect, somewhere between significant and small, on the model's overall performance @hyperparameters_model_performance. Thus, just like the tunable parameters of the model itself, the vector space defined by these hyperparameters comprises regions of different model performances, and indeed model performance can be measured in multiple ways. Presumably, given the task at hand, there will be some region within this parameter space that maximises desired performance goals. In the optimal scenario, the comparison of two sets of architectures will occur between these regions. Thus, a method must find approximate values for these optimal hyperparameters. We might now see the recursion that has started. We are applying an optimisation method to an optimisation that will introduce its own set of hyperparameters. Such hyperparameters will, in turn, need to be at least selected if not optimised. However, it can be shown that the selection of network hyperparameters can make a profound impact @hyperparameters_model_performance on the performance of the model. It is hoped that with each optimisation layer, the effects are considerably diminished, meaning that roughly tuned hyperparameters for the hyperparameter optimiser are sufficient to find comparably optimised solutions. We can use a similar example parameter space to the one that we generated in @gradient-descent-sec, except this time it is being used to represent the hyperparameter space against the model objective function, rather than parameter space against the model loss. See @hyperparameter_space. #figure( image("hyperparameter_space.png", width: 90%), caption: [An example arbitrary hyperparameter space generated from a random mixture of Gaussians. The space presented here is 2D. In actuality, the space is likely to have a much larger dimensionality. Unlike in gradient descent where we are trying to minimize our loss, here we are trying to maximize our objective function, whatever we have determined that to be.] ) <hyperparameter_space> Perhaps unsurprisingly, hyperparameter optimisation is an area of considerable investigation and research in machine learning @hyperparameter_optimisation_review. However, similar to the rest of the field, it would be incorrect to call it well-understood. Whilst there are several effective methods for hyperparameter optimisation, there is no universally accepted set of criteria for which method to use for which problems. What follows is a brief non-comprehensive review of currently available hyperparameter optimisation techniques. === Human-guided trial and error The most straightforward and obvious method to find effective model hyperparameters relies on human-guided trial and error. This method, as might be expected, involves a human using their prior assumptions about the nature of the problem, the dataset, and the model structure, to roughly guide them towards an acceptable solution, using multiple trials to rule out ineffective combinations and compare the results to the human's hypothesised intuitions. Whilst this technique is simple to implement and can be time efficient, it suffers from several deficiencies. The results of this method can vary in effectiveness depending on the previous experience of the guiding human; if they have a lot of experience with prior optimisation tasks, they are likely to have more effectively tuned priors. It is also possible that an experienced optimiser might have overly tuned priors, and this bias might cause them to miss possible new solutions that were either previously overlooked or are only relevant to the particular problem being analysed. The results of this method also suffer from a lack of consistency; even the most experienced human optimiser is unlikely to perform precisely the same optimisation technique across multiple problems. Despite these weaknesses, this method is commonly used throughout gravitational wave machine-learning papers @gabbard_messenger_cnn @george_huerta_cnn and can still be an effective solution for isolated optimisation. === Grid Search A more methodical approach is to perform a grid search across the entirety or a specified subsection of the available parameter space @hyperparameter_optimisation_review. In this method, a grid of evenly spaced points is distributed across the selected parameter space. A trial is performed at each grid point, and the performance results of those trials are then evaluated. Depending on the computing power and time available, this process can be recursed between high-performing points. This method has the advantage of performing a much more rigorous search over the entirety of the parameter space. However, it can be highly computationally expensive if your parameter space has large dimensionality, which is often the case. A grid search can also be ineffective at finding an optimal solution if the objective function is non-linear and highly variable with minor changes, or evidently, if its solution lies outside of the range of initial boundaries. See @grid_search for an example grid search. #figure( image("grid_search.png", width: 90%), caption: [ An example of the samples a grid search might use to find an optimal hyperparameter solution.] ) <grid_search> === Random Search Random search is very similar to a grid search; however, instead of selecting grid points evenly spaced across the parameter space, it randomly selects points from the entirety of the space @hyperparameter_optimisation_review. It has similar advantages and disadvantages to grid search, and with infinite computing resources, both would converge on the ground truth value for the objective function. However, random search has some benefits over grid search that allow it to more efficiently search the parameter space with fewer evaluations. When performing a grid search, the separation of grid points is a user-defined parameter, which both introduces a free parameter and creates possible dimensional bias. A grid search will also search the same value for any given hyperparameter many times, as along the grid axis, it will appear many times, whereas a random search should rarely repeat samples on any hyperparameter. It should also be noted that some statistical uncertainty will be introduced, which would not be present in the case of a grid search and might limit the comparability of different approaches. Both the random and grid search techniques have the disadvantage that all samples are independently drawn, and unless the processes are recursed, no information from the performance results can influence the selection of new points. See @random_search. #figure( image("random_search.png", width: 90%), caption: [ An example of the samples a random search might use to find an optimal hyperparameter solution.] ) <random_search> === Bayesian Optimisation A Bayesian optimisation approach makes use of our initial beliefs, our priors, about the structure of the objective function @hyperparameter_optimisation_review. For example, you might expect the objective function to be continuous and that closer points in the parameter space might have similar performance. The objective function is estimated probabilistically across the parameter space. It is updated as more information is gathered by new samples, which can be tested either in batches or one at a time. The information obtained by these new samples is incorporated into the estimated objective function to move it closer to the ground truth objective function. The placement of samples is determined by a combination of the updated belief and a defined acquisition function, which determines the trade-off between exploration and exploitation. The acquisition function assigns each point in the parameter space a score based on its expected contribution to the optimisation goal, effectively directing the search process. A standard method for modeling the objective function in Bayesian optimisation is Gaussian processes, but other techniques are available, such as random Forests and Bayesian neural networks, among others. This optimisation technique is often employed when evaluating the objective function is expensive or time-consuming, as it aims to find the optimal solution with as few evaluations as possible. See @bayesian_descent_hp_optimisation. #figure( image("bayesian_descent.png", width: 90%), caption: [ An example of the samples a Bayesian optimization might use to find an optimal hyperparameter solution. The descent method shown here has used a Gaussian process to attempt to find the objective function maximum but has not done so particularly successfully. The method was not tuned to try and increase performance, as it was just for illustratory purposes.] ) <bayesian_descent_hp_optimisation> === Gradient-Based Optimisation In some rare cases, it is possible to find optimal model hyperparameters using a similar method to the one we used to determine model parameters during model training @hyperparameter_optimisation_review. We can treat the hyperparameter space as a surface and perform gradient-descent (or in this case ascent, which follows the same principles but in reverse). Since gradient descent was already discussed in some detail in @gradient-descent-sec we will not repeat ourselves here. The advantage of gradient-based optimisation is that it can utilize extremely powerful gradient descent mechanisms that we have seen are potent optimisiers. The major disadvantage, however, is that for most hyperparameters, it is not possible to calculate the gradient. There are workarounds in some specific scenarios and much research has gone into making gradients available, but such work is still in early development and not applicable in many scenarios, thus we limit our discussion to this paragraph. === Population-Based Methods The final category of hyperparameter optimization methods that we will discuss, and the one that we have chosen to employ in our search for more optimal classifiers, are population-based methods @hyperparameter_optimisation_review_2. These come in a variety of different subtypes, most prominent of which perhaps are evolution-based methods, such as genetic algorithms. Population-based methods are any methods that trial several solutions before iterating, or iterate several solutions in parallel, as opposed to trialing one solution, and then iterating the next solution on the results of the previous. Technically, since they trial a number of solutions before iteration, both random and grid searches could be considered population-based methods with only one step, although they are not usually included. Since we have chosen to adopt a method from this group, will will review some of the subtypes. ==== Genetic Algorithms For our hyperparameter search, we have chosen to implement genetic algorithms, a population-based evolutionary method @genetic_algotrithm_review @hyperparameter_optimisation_review @hyperparameter_optimisation_review_2. Genetic algorithms are inspired by the principle of survival of the fittest found in nature within Darwinian evolution @genetic_algotrithm_review. They require the ability to list and freely manipulate the parameters we wish to optimize (in our case our hyperparameters). Continuing with the biological analogy these parameters are labeled as a given solution's *genes*, $g_i$, the complete set of which is the solution's *genome*, $G_i$. We must also be able to test any genome and how well a solution generated with that genome satisfies our objective function. We must be able to condense these measurements into a single performance metric --- the *fitness* of that solution. Any optimization problem that fits these wide criteria can be attempted with genetic algorithms, meaning they are a flexible optimization solution. Our problem, the hyperparameter optimization of deep learning models, fits both criteria, thus, genetic algorithms are an applicable method for the task. Initially, a number of genomes, $N$, are randomly generated within predefined parameter space limits @genetic_algotrithm_review. All possible gene combinations must produce a viable genome, or a mechanism must be in place to return a fitness function of zero if a solution is attempted with an invalid genome. A solution (in our case, a model) is generated for each of the $N$ genomes. This set of solutions forms your population. Every member of the population is trialed (in our case, the model is trained) either sequentially or in parallel depending on your computational resources and the scope of the problem. In the basic genetic algorithm case, each trial within a generation is independent and cannot affect another member of the population until the next generation. After each solution has been trialed, it must be evaluated (the model is validated) in order to produce a fitness function. This process of generating a set of genomes that defines a population of solutions, and then testing each member of the population to measure its effectiveness, is known as a generation. Multiple generations will be iterated, but the creation of each generation after the first is based on the fitnesses and the genomes of the previous generation rather than just being randomly generated as in the first generation. Genes and gene combinations that are found in highly-scoring members of the population are more likely to be selected for use in the next generation. After the algorithm has run for a number of generations, possibly determined by some cut-off metric, in theory, you should have produced a very highly-scoring population. You can then select the best-performing model from the entirety of your evolutionary history. It is the selection process between generations that gives the genetic algorithm its optimising power @genetic_algotrithm_review. Rather than grid or random methods, each generation uses information from the previous generation to guide the current generation's trials. There are multiple slightly different variations, we use one of the most common techniques, which is described in more detail in @dragonn-method. As mentioned genetic algorithms are very flexible; they can be applied to a wide variety of optimization problems @genetic_algotrithm_review. They can handle almost any objective function and operate in any kind of parameter space, including discrete, continuous, or mixed search spaces @genetic_algotrithm_review. They are also quite robust. Unlike many optimization solutions which, sometimes rapidly, single out a small area of the parameter space for searching, genetic algorithms perform a more global search over the parameter space. Despite these advantages, they have the significant disadvantage of requiring a large number of trials before converging on a high-performing solution. For this reason, they are less often used for hyperparameter optimization as each trial requires model training and validation @hyperparameter_optimisation_review_2. For completion, we will also discuss several other population-based optimization techniques. ==== Differential Evolution Like genetic algorithms, differential evolution methods are a form of evolutionary algorithm, but rather than generating a new population based on a selection of genes from the previous generation, differential evolution instead generates new parameters based on differentials between current solutions @hyperparameter_optimisation_review_2 @differential_evoloution. This means that genes in the current generation are not necessarily anything like genes in the previous generation --- parameters are treated in a vector-like manner rather than discreetly. Differential evolution can work well for continuous parameter spaces, and shares many of the advantages of genetic algorithms as well as sometimes converging more quickly, however, it deals less well with discrete parameters than genetic algorithms and is less well studied, so understanding of their operation is not as well developed @differential_evoloution. When optimising our models, we use a mix of discrete and continuous values, so we could not apply differential evolution to all hyperparameters, making differential evolution tricky to employ. ==== Particle Swarm Optimisation Particle swarm optimization is inspired by the emergent behavior found in swarms of insects, flocks of birds, and schools of fish @hyperparameter_optimisation_review_2. Seemingly without coordination or central intelligence, large numbers of individually acting agents can arrive at a solution to a problem using information from their nearest neighbours @flocks. In particle swarm optimisation, akin to genetic algorithms, an initial population is randomly generated and trialed. In this case, each member of the population is called a particle, forming the elements of a swarm. Rather than waiting for the end of each generation in order to update the parameters of each solution, each solution is given a parameter-space velocity which is periodically, or continuously updated by the performance of the other members of the population. Some variations aim to imitate real animal swarms more closely by limiting each particle's knowledge to certain regions or to improve convergence rates by weighting some particles more highly than others @hyperparameter_optimisation_review_2. Particle swarms can have much quicker convergence than genetic algorithms, due to the continual updates to their trajectory in parameter space. However, effective employment of particle swarms requires that your solutions can adjust their parameters quickly, which is not the case for many deep learning hyperparameters, most prominently structural hyperparameters which would often require retraining the model from scratch after only small changes. == Dragon Method <dragonn-method> As our attempt to apply genetic algorithms to the problem of deep learning model optimisation in gravitational waves, we introduce Dragonn (Dynamic Ranking And Genetic Optimisation of Neural Networks). Originally a standalone software library developed in C, Dragonn was rewritten in Python utilizing other recent advances made in the GravyFlow pipeline. A previous version which was used to optimise the core MLy models was existent, but data from those early experiments was lost, so a decision was made to remake the experiments with the updated Dragonn tools. In the following subsection, we will justify our selection of genetic algorithms as the hyperparameter optimisation method of choice, explain in detail the operation of genetic algorithms, and discuss the choice of optimiser parameters selected for tests of Dragonn's optimization ability. === Why Genetic Algorithms? Genetic algorithms are an unusual choice for artificial neural network hyperparameter optimization and have fallen somewhat out of fashion in recent years, with Bayesian methods taking the limelight. Genetic algorithms typically require many trials before they converge on an acceptable solution, and although they are extremely flexible and adaptable methods, which are easy to implement and fairly straightforward to understand, the computational expense of individual trials of neural network architectures can often be prohibitively expensive to the application of genetic algorithms. Many of the hyperparameters of artificial neural networks are immutable without completely restarting training. While it is possible to adjust the training dataset and training hyperparameters such as the learning rate during model training, there are many hyperparameters related to the network architecture for which training would have to be completely restarted should they be altered, typically reinitializing the model's tunable parameters in the process. This means that for each trial during our optimization campaign, we will have to train a model from scratch, which can be a computationally expensive endeavour especially if the models are large. More computationally hungry layers, such as the attention-based layers that are discussed in future chapters, would require even more time and resources per trial, making genetic algorithms even more costly. Unfortunately, most of this was not known at the initiation of the project. It should be noted, that at that time, hyperparameter optimization methods were less developed. As the project developed, however, there were new ideas for how genetic algorithms could be better adapted for their task. We can imagine some methods to alter model architectural hyperparameters without entirely resetting the tunable weights of the model in the process. For example, we could add an extra convolutional filter to a convolutional layer, randomly initializing only the new parameters, and keeping existing parameters the same, similarly, we could remove a convolutional kernel. It might also be possible to add and deduct entire layers from the model without completely resetting the tunable parameters every time. A method to reuse existing trained parameters was envisioned. Unfortunately, performing such surgery on models compiled using one of the major machine-learning libraries, in our case TensorFlow, is fairly difficult. So although many alternative methods were conceived, none progressed to the point where they were ready for testing. With all that said, population-based methods are far from dead, and there are still some significant advantages over other methods. For extremely complex spaces, with many parameters to optimize, genetic algorithms can be the best solutions possible, though as noted they can take many trials to reach this optimum solution. It should also be noted, that although hyperparameter optimization can be very highly dimensional, it is usual, in artificial neural network design, for the number of dimensions that are important to model performance to be quite low, meaning that the search space is considerably lessened. There are big players in AI who use population-based methods similar to genetic algorithms for model optimization, including Google DeepMind @deepmind_population_based, so it is hoped that further development of this method could result in a highly adaptable population-based method for the optimization of neural networks for use in gravitational-wave research. Much of the software has already been developed, and although it would be a complex task, it would be a rewarding one. We have some things in our favour: our input data is not particularly highly dimensional, and the models we are attempting to train, are simple Convolutional Neural Networks (CNNs) that are not especially memory or resource intensive, meaning that it should be possible for us to run a relatively large number of trials. The method and software developed for this research have also seen use in a gravitational wave detection pipeline, MLy @MLy, so it has already been useful to the scientific community. The developed genetic algorithm software included as part of GravyFlow makes it very easy to add new hyperparameters to the optimisation task, those parameters can be continuous, discrete, or boolean. The range of hyperparameters set up for optimisation with Dragonn is already extensive, as is demonstrated in @hyperparameter-seclection-sec. There has been at least one attempt to use genetic algorithms for hyperparameter optimisation within gravitational wave data science in the past @ga_graviational_waves. Deighan _et al._ share an interest in developing a consistent method for generating hyperparameter solutions, and they use a similar approach to the method described here. They demonstrate that genetic algorithms can indeed generate models with high performance. The work of Deighan _et al._ optimizes a reasonable, but limited number of hyperparameters, predefining several structural elements of the network. We have allowed our optimiser considerably more freedom, although we note that this could also lead to an increased convergence time. === Selection of Mutable Hyperparameters <hyperparameter-seclection-sec> Genetic algorithms are optimisation methods that can be used to find a set of input parameters that maximise a given fitness function. Often, this fitness function measures the performance of a certain process. In our case, the process being measured is the training and testing of a given set of hyperparameters. The hyperparameters then, form the parameters that the optimisation method will adjust to find a performant solution. The GravyFlow optimisation model allows us to optimize a wide range of hyperparameters. Whilst it was wished to perform a large optimisation run over all possible hyperparameters, we elected to use only a subset in order to improve convergence speeds due to time constraints. Model hyperparameters can be split into three categories, with the last category divisible into two subcategories: *dataset hyperparameters*, *training hyperparameters*, and *structural hyperparameters*. - *Dataset hyperparameters* control the structure and composition of the training dataset, including its size, the number of examples in each class within the dataset, and the properties of each example. In our case, dataset hyperparameters include the properties of the noise, the signals injected into that noise, and any additional obfuscations we wish to add to the data like the injection of simulated glitches. It is important to ensure that our optimisation method cannot also adjust the properties of the validation and testing Datasets. It would be very easy for the genetic algorithm to find a solution wherein it makes the difference between classes in the validation set as easy as possible to identify or to make the validation dataset incredibly short, or perhaps remove all but one class of example. If we restrict our optimisation to the training dataset, however, this can be a good way to find optimal hyperparameters. The composition of the training dataset can often be a crucial part of optimising model performance. Unfortunately, we did not run the genetic algorithm on any dataset parameters, since we attempted to optimise for time. The set of possible genes for dataset hyperparameters is shown in @datset-hyperparameters. #figure( table( columns: (auto, auto, auto, auto), [*Hyperparameters Name (gene)*], [*Type*], [*Optimised*], [*Range*], [Sample Rate (Hz)], [Integer], [No], [-], [Onsource Duration (s)], [Integer], [No], [-], [Offsource Duration (s)#super($plus$)], [Integer], [No], [-], [Total Num Examples], [Integer], [No], [-], [Percent Signal], [Float], [No], [-], [Percent Noise], [Float], [No], [-], [Percent Glitch], [Float], [No], [-], [Noise Type], [Discrete], [No], [-], [Whiten Noise? ($plus$)], [Boolean], [No], [-], [_For each feature type_], [], [], [], [SNR Min\*], [Float], [No], [-], [SNR Max\*], [Float], [No], [-], [SNR Mean\*], [Float], [No], [-], [SNR Median\*], [Float], [No], [-], [SNR Distribution (\*)], [Discrete], [No], [-], ), caption: [Possible Dataset Hyperparameters. These are parameters that alter the structure and composition of the dataset used to train our model. None of these parameters were selected for inclusion in our hyperparameter optimization test, in order to decrease convergence time. Parameters with a superscript symbol become active or inactive depending on the value of another parameter for which that symbol is contained within brackets. Range entries are left black for hyperparameters not included in our optimisation, as no ranges were selected for these values. ] ) <datset-hyperparameters> - *Training hyperparameters* are parameters used by the gradient descent algorithm, which dictate the training procedure of the neural network. These include things like the learning rate, batch size, and optimization choice. As with the dataset hyperparameters, these are fairly easy to alter after training has begun without first resetting all of the model's tunable parameters, so could easily be incorporated into a more complex population-based method. None of these parameters were selected for optimization during this experiment. The set of possible genes for dataset hyperparameters is shown in @training-hyperparaneters. #figure( table( columns: (auto, auto, auto, auto), [*Hyperparameters Name (gene)*], [*Type*], [*Optimised*], [*Range*], [Batch Size], [Integer], [No], [-], [Learning Rate], [Float], [No], [-], [Choice of Optimiser(\*)], [Discrete], [No], [-], [Various Optimiser Parameters\*], [Discrete], [No], [-], [Num Training Epochs], [Float], [No], [-], [Patience], [Discrete], [No], [-], [Choice of Loss function], [Discrete], [No], [] ), caption: [Possible training hyperparameters. These are parameters that alter the training procedure of the model. None of these parameters were selected for inclusion in our hyperparameter optimization test, in order to decrease convergence time. Parameters with a superscript symbol become active or inactive depending on the value of another parameter for which that symbol is contained within brackets. There are different optimiser parameters that could also be optimized depending on your choice of optimiser, for example, values for momentum and decay. It is not typical to optimise your choice of loss function for most tasks, but some are possible with a range of loss functions, such as regression, which could benefit from optimisation of this parameter. Range entries are left black for hyperparameters not included in optimisation, as no ranges were selected for these values.] ) <training-hyperparaneters> - *Architecture hyperparameters* are parameters that control the number and type of layers in a network. This is by far the most extensive category of hyperparameter since many of the layers that themselves are controlled by hyperparameters contain hyperparameters. For example, a layer in a network could be any of several types, dense, convolutional, or pooling. If convolutional were selected by the optimiser as the layer type of choice, then the optimiser must also select how many filters to give that layer, the size of those filters, and whether any dilation or stride is used. Each layer also comes with a selection of possible activation functions. This increases the number of hyperparameters considerably. In order to allow the optimiser maximal freedom, no restrictions on the order of layers in the network were imposed, any layer in a generated solution could be any of the possible layer types. Another independent hyperparameter selected how many of those layers would be used in the generation of the network in order to allow for various network depths. The output layer was fixed as a dense layer with fixed output size, to ensure compatibility with label dimensions. The set of possible genes for dataset hyperparameters is shown in @architecture-hyperparameters #figure( table( columns: (auto, auto, auto, auto), [*Hyperparameters Name (gene)*], [*Type*], [*Optimised*], [*Range*], [Nummber of Hidden Layers], [Integer], [Yes], [0 to 10], [_One each for each active layer_], [], [], [], [Layer Type], [Discrete], [Yes], [Dense(\*, $plus$), Convolutional(\*, $times$), Pooling($diamond$), Dropout($square$)], [Activation Function\*], [Discrete], [Yes], [ReLU, ELU, Sigmoid, TanH, SeLU, GeLU, Swish, SoftMax], [Num Dense Neurons #super($plus$)], [Integerr], [Yes], [1 to 128 (all values)], [Num Filters #super($times$)], [Integer], [Yes], [1 to 128 (all values)], [Kernel Size #super($times$)], [Integer], [Yes], [1 to 128 (all values)], [Kernel Stride #super($times$)], [Integer], [Yes], [1 to 128 (all values)], [Kernel Dilation #super($times$)], [Integer], [Yes], [0 to 64 (all values)], [Pool Size #super($diamond$)], [Integer], [Yes], [1 to 32 (all values)], [Pool Stride #super($diamond$)], [Integer], [Yes], [1 to 32 (all values)], [Dropout Value #super($square$)], [Float], [Yes], [0 to 1 (all values)], ), caption: [Possible architecture hyperparameters. These are parameters that alter the architectural structure of the model, or the internal structure of a given layer. All these parameters were selected for optimisation. Parameters with a superscript symbol become active or inactive depending on the value of another parameter for which that symbol is contained within brackets. For each of the $N$ layers, where $N$ is the value of the number of hidden layers gene, a layer type gene determines the type of that layer, and other hyperparameters determine the internal function of that layer. ] ) <architecture-hyperparameters> These parameters are called genes, $g$. Each set of genes is called a *genome*, $matrixn(G)$. $G = [g_1, g_i ... g_{x}]$, where $x$ is the number of parameters adjustable by our optimiser. Each genome should map to a single fitness score, $F$, via our chosen fitness function. === Genetic Algorithms in Detail Genetic algorithms operate using the following steps, note that this describes the procedure as performed for this dissertation, slight variations on the method are common: + *Generation:* First, an initial population of genomes, $P$, is generated. $P = [G_1, G_i, ... G_N]$, where $N$ is the number of genomes in the population. Each genome is randomised, with each gene limited within a search space defined by $g_(i"min")$ and $g_(i"max")$. + *Evaluation:* Next, each genome is evaluated by the fitness function to produce an initial fitness score. In our case, this means that each genome is used to construct a CNN model which is trained and tested. The result of each test is used to generate a fitness score for that genome. + *Selection:* These fitness scores are used to select which genomes will continue to the next generation. There are a few methods for doing this, however, since we do not expect to need any special functionality in this area we have used the most common selection function - "the Roulette Wheel" method. In this method, the fitness scores are normalised so that the sum of the scores is unity. Then the fitness scores are stacked into bins with each bin width determined by that genome's fitness score. $N$ random numbers between 0 and 1 are generated, and each genome is selected by the number of random numbers that fall into its bin. Any given genome can be selected multiple or zero times. + *Crossover and Mutation:* The genomes that have been selected are then acted upon by two genetic operators, crossover and mutation. Firstly, genomes are randomly paired into groups of two, then two new genomes are created by randomly selecting genes from each parent. A "mutation "is then performed on each of the new genomes with a certain mutation probability $M$. Mutation and Crossover create genomes that share elements of both parents but with enough differences to continue exploring the domain space. + *Termination:* If the desired number of generations has been reached the process ends and the highest-performing solution is returned. Else-wise the process loops back to step 2 and the newly created genomes are evaluated. ==== Choice of Fitness Function There are multiple possible variants on the standard genetic algorithm model but for the most part, we have kept to the generic instantiation. It is a common choice to use the model loss as the fitness metric for optimisation, this makes sense in many ways, as the goal of training a model is to reduce its loss function, a better loss indicates a better model. However, the model loss function often fails to map exactly for our requirements to the model. The form of the loss function affects the model's training dramatically, so we cannot just use any function we wish as the loss function, and some things we are trying to optimise for might be too expensive to compute during every training iteration, or impossible to compute directly in this manner. We have chosen to use the area under a FAR-calibrated efficiency curve. Only values above an SNR of 8 were included in the sum, and the FAR chosen was 0.01 Hz, with the assumption that performance at this FAR would translate to performance at a lower FAR. A lower FAR was not directly used because it would be computationally expensive to compute for every trial. This objective function was chosen as it is representative of the results we look for to determine whether our model is performant or not. If those are the results we will be examining, we may as well attempt to optimise them directly. ==== Choice of Crossover Method There are several potential choices for crossover methods, one-point crossover, k-point crossover, or uniform crossover. In one-point crossover we treat our two genomes, one from each parent, as long arrays, like two DNA strands, the crossover mechanism randomly cuts both strands in two and selects half from one strand, and the second half from the other genome, generating a new genome by splicing the old, this the simplest approach which in some cases can lead to faster convergence, but it can reduce the possibly for mixing genomes in interesting ways, reducing the total search space. K-point crossover is similar, but selects multiple places to cut, and splices the gene in a more complex manner, this can increase mixing possibilities but can decrease convergence, as the new genome is more likely to gain combinations of genes that perform poorly. The final possibility is uniform mixing, which effectively equates to cutting before and after every genome. Each genome in the new genome is randomly selected between parent a and parent b, this maximizes mixing but can increase convergence time. We selected to use uniform crossover in order to maximise the possible search space, although we were concerned about increasing convergence times, we wanted to ensure that we explored a wide area of the parameter space effectively. === Choice of Mutation Method As well as crossover, mutation was also performed at the inception of every new genome. Mutation ensures that the population keeps exploring new areas of parameter space even as certain traits dominate the population, by introducing a small chance that a gene can randomly change value. Our method for performing mutation is dependent on whether the value of that gene is an integer, continuous, discrete, or boolean. For all cases, there is a 5% chance for mutation to occur in any given gene after crossover has taken place. For continuous and integer values, the value of the gene is mutated either negatively or positively by an amount drawn from a Gaussian distribution, in the case of integer parameters this is then rounded to the nearest parameter. For discrete and boolean values, a new value is drawn from the possible selection, with all values being equally likely -- this is different from the integer case as choices in the discrete category are not ordered. === Datasets The GravyFlow data @gwflow_ref and training pipeline were used to generate the datasets used in each of the trial solutions. We are attempting to detect BBH IMRPhenomD signals generated with cuPhenom @cuphenom_ref and obfuscated by real LIGO interferometer noise drawn from the LIGO Livingston detector, Although GravyFlow lends itself well for use in hyperparameter optimization methods due to its rapid generation of datasets and lack of requirement for pre-generated datasets, we elected not to optimize dataset parameters in an attempt to decrease the time till model convergence. Instead, we used identical dataset parameters to those used for the perceptron experiments, but we decreased the training patience to a single epoch, meaning if any epoch has a validation loss higher than the epoch previous, training halts. This was done in order to reduce the time taken for each trial. The parameters used for the training and dataset can be seen in @perceptron-training-parameters. == Dragonn Results The genetic algorithm work presented in this chapter has been in development for a long time, but this particular iteration only reached its full capabilities in recent months. What this has meant is that time pressure did not allow for a large number of generations to be run. Optimization was performed over four generations, which is very low for a genetic algorithm optimization run. Nonetheless, we can explore the results, and we have made some intriguing discoveries, even if they were somewhat accidental. == Dragonn Training First, we can examine the training histories of our models, and note the difference in performance between generations. @dragon_training_results displays the training results, demonstrating that most of the networks fail to achieve any classification ability. This is expected. As we have allowed complete genetic freedom for layer order and parameters, many nonsensical arrangements of layers are possible which will inhibit any possibility of classification performance. With disappointment, we note that even in the later generations no models reach accuracies above around 95%, this could, in part, be a result of our reduced training patience halting training early before extra performance can be extracted, although we note that even in cases where more epochs were reached, the accuracy seems to flatline. Setting the value of patience to one has other consequences, a great number of the somewhat models across the generations were stopped as they reached epoch two where their model loss dropped below the loss for epoch one. It is unknown exactly why this is the case since all models were trained on exactly the same training dataset generated with the same random see, it could be that the training data in that epoch is particularly unhelpful to the model in some way though a statistical fluke. The validation datasets are consistent across epochs, so there could not be a variation in validation difficulty causing this hurdle. Even with the chaotic diagrams, it is easy to see that the number of performant models increases with each generation, so we have verified that our optimiser works --- we will examine average metrics later for verification of this. However this is not a particularly interesting result, it is known that genetic algorithms work. We do however have an interesting result that demonstrates the importance of a future, wide hyperparameter search. performances close to gabbard small. #figure( grid( columns: 1, rows: 4, gutter: 1em, [ #image("accuracy_generation_1.png", width: 100%) ], [ #image("accuracy_generation_2.png", width: 100%) ], [ #image("accuracy_generation_2.png", width: 100%) ], [ #image("accuracy_generation_4.png", width: 100%) ], ), caption: [Dragonn model training histories from each of the four generations. All models were trained with identical training datasets and validated with epoch-consistent validation data. After each epoch, a new population was generated by applying the genetic algorithms mechanism to select perfomant genes in previous generations. In all generations many models lack any classification ability, this is anticipated because, because of the scope of the hyperparameter search, many of the models generated will be nonsensical, with extremely small data channels or near complete dropout layers. However, we also see that our population size was enough for a considerable number of performance models. With increasing generations, we see increasing numbers of performant models, demonstrating that our genetic optimiser is operating as intended.] ) <dragon_training_results> Next, we can examine the average metrics from each epoch. In @dragon_averages we examine four metrics of interest; the average maximum model accuracy, that is, the average of all the highest accuracies models archived across their training run; the average lowest model validation loss, the average number of epochs a training run lasted for, and finally the average model fitness. The model fitness is the percentage of correctly classified validation examples with an optimal SNR greater than 8 when using a detection threshold calibrated to a far of 0.01. The metrics show us what we anticipated, increasing average performance in all metrics across generations. The average number of epochs increases as the number of performant models increases since performant models are more likely to reduce their validation loss over the previous epoch. As expected an increase in model fitness correlates with an increase in accuracy and a decrease in model loss, suggesting that better-performing models when measured with uncalibrated FAR thresholds and loss functions, in general act as better-performing methods in low FAR regimes, although this is not always the case, as we will explore when we examine the best performing models of the generation. #figure( image("averages.png", width: 100%), caption: [Dragonn average metrics from each of the four generations. The blue line is the average best model accuracy across its training run, The red line is the average model loss, the purple line is the average number of epochs in a model's training history, and the green line is the average model fitness. These results are mostly as all average metrics improve with increasing generation count, the drop in loss is particularly impressive, but this probably corresponds to the shedding of extremely poorly designed models after the first epoch. Accuracy is slowly improving, as the number of performant models increases, and with it the average number of epochs in a model's training history. Within increasing numbers of performant models comes increasing numbers of models that can perform better than their last epoch after further training.] ) <dragon_averages> The result of an optimization algorithm is only as good as the highest-performing model. So we shall examine the population and extract the most performant models for inspection. Luckily our choice of objective function --- the percentage of validation examples over an optimal SNR of 8 that are correctly classified when calibrated to a FAR of 0.02 Hz, encapsulates most of what we desire out of our models, so we can use this to guide our search to the best models. We have extracted the top ten scoring models in @top_model_perfomances. Interestingly, and perhaps worryingly for the effectiveness of our optimization method, the top three models are all in the first generation. This tells us that although the average fitness was increasing along with other metrics of interest, that does not necessarily equate to generating more highly scoring models, which seems counterintuitive. However, examining the validation results of the top-scoring model more closely can lead us toward a reason for this discrepancy, and perhaps an interesting area of further investigation. #figure( table( columns: (auto, auto, auto), [*Rank*], [*Generation*], [*Fitness*], [1], [1], [0.9423], [2], [1], [0.9337], [3], [1], [0.9215], [4], [4], [0.888], [5], [3], [0.887], [6], [2], [0.870], [7], [2], [0.868], [8], [4], [0.860], [9], [1], [0.841], [10], [4], [0.841], ), caption: [The top ten models in any of the populations throughout the genetic optimisation process, out of a total of 800 trial solutions, 200 at each epoch. Unexpectedly, the three top-scoring models when ranked by fitness, the very metric our optimization method is attempting to optimise are in the first generation. The first generation of a genetic optimisation search will alone act as a random search, so it is perhaps not unsurprising that it has some ability to find good solutions, however, we would expect better solutions to arise out of on average better-performing populations. This could perhaps be a result of our very low epoch count, or a statistical fluke. If it were the latter, however, it would seem very unlikely that the top three spots were taken by a first-generation model. The other option is that there was some asymmetry between the generations.] ) <top_model_perfomances> In @top_model_perfomance we examine the efficiency plot used to generate the fitness scores for the highest-scoring model, a model from the first generation. These efficiency plots show extremely strong performance in the lower FAR regimes at medium SNRs, but this appears to come at the cost of some of the performance at the higher SNRs, which do not perform as well as the CNN models from the literature. #figure( grid( columns: 1, rows: 3, gutter: 1em, [ #image("best_model_efficiency_0_1.PNG", width: 100%) ], [ #image("best_model_efficiency_0_01.PNG", width: 100%) ], [ #image("best_model_efficiency_0_001.PNG", width: 100%) ], ), caption: [Efficiency curves of the top performing model from the population of Dragonn trials. The curves maintain high accuracy at low FARs, though their performance a high SNRs above 10 is worse, never reaching a 100% accuracy, their performance at an SNR of 6 is considerably greater. It is hypothesized that this is due to an inoculation effect generated by the erroneous injection of WNB glitches into the dataset during the first generation. _Top:_ Efficiency curve using a threshold calibrated to a FAR of 0.1 Hz. _Middle:_ Efficiency curve generated using a threshold calibrated to a FAR of 0.01 Hz. _Bottom:_ Efficiency curve generated using a threshold calibrated to a FAR of 0.001 Hz. ] ) <top_model_perfomance> We hypothesize that this effect arises from a mistake made during the first generation. Initially, the plan had been to optimize the dataset parameters at the same time as the model parameters, and Dragonn was set up to allow the optimiser to adjust the percentage of training examples that contained CBC signals, this defaulted to 50%. However, it was also envisioned that the network could add its own synthetic glitches to the dataset, in order to act as counterexamples, this was also set to inject simulated WNBs into 50% of injections by default, including ones that also contained a CBC. It was not realised that this had been left in this state until partway through the first generation, where it was rectified, however, due to time pressure, the first trials were not repeated. Considering the three most performant results were all in the first 50 values, this oversight likely seems the cause. The initial idea behind allowing the optimiser to inject simulated glitches was to allow it to act as an inoculant against particularly structured background noise, it would force the network to use the signal morphology because excess power could also come from glitches which would not correlate to a signal class. Due to the way the software was set up, these WNBs were also erroneously injected into the validation examples. In situations where a very high WNB is injected over the top of a signal it could obfuscate it even if that signal had quite a high SNR, this effect could be causing the reduction in ability at the higher SNRs. == Discussion Our attempt to expand the range of the hyperparameter search was admirable but overambitious. The time taken to perform such an expansive search was underestimated, and time pressure led to mistakes in the final optimisation run, and an insufficient number of generations to gain any real insight into the optimisation power of genetic algorithms with this degree of optimisation freedom. Our mistakes did, however, lead us to an interesting discovery that could certainly warrant further investigation. There do not seem to be any existent investigations within the literature into such a method of using fake glitches to innoculate CBC detection models from structured background noise. The closest to such a method is perhaps the training procedure used to train the models used in the MLy pipeline @MLy. MLy is a coherence detection pipeline so relies on the machine learning models being able to detect coherence between different detectors rather than specific signal morphologies. In order to train the model to distinguish between glitches and real signals, it is trained with counterexamples consisting both of coincident and incoherent glitches across multiple detectors and single detector glitches. Without a deeper investigation, it is difficult to know whether these glitches were indeed the source of improved performance. If this does turn out to be the case it is very exciting. We can perhaps remove the degradation at high SNRs by only injecting glitches into noise examples, it is unclear whether this would maintain the impressive performance at low SNRs and FARs, but it seems reasonable to think that it might. If this investigation has unveiled anything, it is that a wide search of the parameter space of both models and datasets could reveal unpredicted and useful results. == Deployment in MLy <deployment-in-mly> Whilst this attempt to demonstrate genetic algorithms for optimizing CBC detection models has fallen short, they were used to generate models for the MLy pipeline, which consists of two models. One that is designed to detect coincidence @mly_coincidence, and a second model that is trained to detect coherence @mly_cohernece. Since these were both relatively unknown problems compared to the CBC search, not much was known about the ideal structure of artificial neural networks for these problems. Optimizing models by hand can be time-consuming and generates many opportunities to miss interesting areas of the parameter search space. A previous version of the Dragonn optimiser was used to develop the models that are today in use by MLy @MLy. #figure( image("mly_coincidence_diagram.png", width: 75%), caption: [MLy Coincidence Model developed with Dragonn @MLy. ] ) <mly_coincidence> #figure( image("mly_coherence_diagram.png", width: 100%), caption: [MLy Coherence Model developed with Dragonn @MLy. ] ) <mly_cohernece>
https://github.com/0x546974616e/typst-resume
https://raw.githubusercontent.com/0x546974616e/typst-resume/main/README.md
markdown
# Typst Resume A simple and modern resume template written in [Typst][]. [Typst]: https://github.com/typst/typst ## To build ```sh make make all make watch make all CONFIG=<path/to/file.yaml> LANGCC=<lang> make watch CONFIG=<path/to/file.yaml> LANGCC=<lang> ``` If no `CONFIG` is provided, [`example.yaml`](configs/example.yaml) is loaded by default. (Translations are still WIP)
https://github.com/mrtz-j/typst-thesis-template
https://raw.githubusercontent.com/mrtz-j/typst-thesis-template/main/template/chapters/basic-usage.typ
typst
MIT License
#import "global.typ": * #import "../utils/symbols.typ": * #import "../utils/todo.typ": * This chapter will go over the template structure and its basic usage. Users should note that the file structure discussed here is merely a recommended starting point and not required for using the template package. == Template structure <subsec:template_structure> As opposed to lightweight and uncomplicated report templates you may be familiar with if you have used typst or #LaTeX before, this template has a slightly more involved _file structure_. Instead of writing all content in one large `thesis.typ` file, each chapter is written into its own file and imported in `thesis.typ` instead. These chapters are placed in their own directory. @fig:file_structure shows a tree view of the default file structure of this template. In addition to the `chapters` directory, there is also one for figures. Here you can neatly store all your `.svg`, `.png` or `.jpg` files and reference them in the chapters. Alternatively, some students might prefer to organize further with a directory for each chapter for both typst content and figures, when their thesis grows in size. Another important file to note is `refs.bib`. This is where you put your #BibTeX entries that will produce your bibliography, just like you are used to when working with #LaTeX. #[ #figure(caption: [File structure tree view])[ #local(zebra-fill: none, number-format: none)[ ``` template ├── chapters │   ├── basic-usage.typ │   ├── figures.typ │   ├── global.typ │   ├── introduction.typ │   ├── typst-basics.typ │   └── utilities.typ ├── figures │   ├── dining_philosophers.png │   ├── philosophers.png │   ├── plot_serial.svg │   └── uit_aurora.jpg ├── refs.bib ├── thesis.pdf ├── thesis.typ └── utils ├── caption.typ ├── feedback.typ ├── form.typ ├── subfigure.typ ├── symbols.typ └── todo.typ ``` ] ] <fig:file_structure> ] == Getting Started <subsec:getting_started> In order to get started using this template document class for your thesis, you can start off with the template you are reading right now right from the typst webapp #footnote[see #link("https://typst.app")]. Very similar to Overleaf, it is an online editor which conveniently compiles and displays your document as you write, and allows for easy online access for your supervisor. You can also edit the document simultaneously with your co-author if you have one. The typst webapp lets you browse templates and will initialize this template for you when you select it. If you want to work with typst locally in your favorite text editor instead, make sure you have `typst` installed and run `typst init @preview/modern-uit-thesis:0.1.1 my-thesis` and the template will be initialized into `my-thesis`. Now you can compile the document using `typst compile`, or `typst watch` to automatically reload as you make changes to it. Starting in `thesis.typ`, we can see a function call to a function `thesis`. This is how the thesis template style is applied to the document. There are a number of parameters that can be sent into this invocation both to provide special content like title, abstract or list of abbreviations as well as additional customization details. The default arguments demonstrated in `thesis.typ` should give you an idea of the usage. We recommend you follow along in the typst code for each chapter as you read them in order to discover how you can leverage the useful features demonstrated there yourself.
https://github.com/Alignof/Juggernaut_typst
https://raw.githubusercontent.com/Alignof/Juggernaut_typst/master/README.md
markdown
MIT License
# Juggernaut_typst Document for Juggernaut in typst.
https://github.com/Owl-Boy/Model-Repair-Of-Time-Aware-Models-Report
https://raw.githubusercontent.com/Owl-Boy/Model-Repair-Of-Time-Aware-Models-Report/main/greedy.typ
typst
#import "lapreprint.typ": template #import "@preview/lovelace:0.2.0": * #import "@preview/showybox:2.0.1": showybox #import "@preview/algo:0.3.3": algo, i, d, comment, code #import "@preview/colorful-boxes:1.2.0": * #import "@preview/fletcher:0.4.5" as fletcher: diagram, node, edge #show: setup-lovelace // #show figure.caption: it => [ // #underline(it.body) | // #it.supplement // #context it.counter.display(it.numbering) // ] #let colred(x) = text(fill: red, $#x$) #let colgray(x) = text(fill: gray, $#x$) #let colgreen(x) = text(fill: green.lighten(20%), $#x$) // Problem Callout Block #let problem(title, content) = figure( showybox( frame: ( border-color: red.darken(50%), title-color: red.lighten(60%), body-color: red.lighten(80%) ), title-style: ( color: black, weight: "regular", align: center ), title: title, content ), supplement: [Problem], kind: "Problem", ) // Theorem / Lemma Callout Block #let theorem(content) = figure( showybox( frame: ( border-color: purple.darken(40%), body-color: purple.lighten(95%) ), content ), supplement: [Theorem], kind: "Theorem", ) // Definition Callout Block #let definition(content) = figure( showybox( frame: ( border-color: green.darken(40%), body-color: green.lighten(95%) ), content ), supplement: [Definition], kind: "Definition", ) // Examples Callout Block #let example(content) = figure( showybox( frame: ( border-color: blue.darken(40%), body-color: blue.lighten(95%) ), content ), supplement: [Example], kind: "Example" ) #show: template.with( title: "Model Repair and Conformance Checking of Time-Aware Models", theme: red.darken(50%), authors: ( ( name: "<NAME>", affiliations: "1, 2", ), ( name: "<NAME>", affiliations: "2", orchid : "0000-0002-1470-5074", ), ), affiliations: ( (id: "1", name: "Chennai Mathematical Institute"), (id: "2", name: "LSV, ENS Paris-Saclay"), ), abstract: ( ( title: "Abstract", content: "The subject of this paper is to implement model repair for timed models, that is, process models that consider both the sequence of events in a process as well as the timestamps at which each event is recorded. Time aware process mining is a growing subfield of researchh, and as tools that seek to discover timing related properties in the process develop, so does the need for conformance checking techniques that give insightful quality measure, for the purpose of this paper, we use alignments as witness for the model being unfit, we then give algorithms improving the models"), ), keywords: ("Time Petri Nets", "Model Repair", "Conformance Checking"), kind: "Research Internship", bibliography-file : "ref.bib" ) = Introduction == Process Mining and Model Repair *Process Mining* is a family of techniques used to analyze event data in order to understand and improve the system. It studies the system through their event logs and seeks to extract meaningful ways to model the underlying process to understand the system better or to predict it's future behaviour @Process-Mining. It is a multi-step process that happens as follows: *Discovery:* Discovery in process mining is the process of constructing a model as a an attempt to describe the working of a given system. This is a well studied domain and it is common to see tools like Machine Learning used here. But the unexplainability of the approaches that processes like ML take naturally makes one questions if the produced model approximates the target system well enough. \ This is where *Conformance Checking* comes in. Conformace Checking is a set of techniques used to comapre a _process model_ with an event log and rate it over some parameters like: - Fitness : Does the model exhibit the beahviours specified in the logs? - Precision : Does the model deviate a lot from the behavior specified in the logs? - Generalization : Does the correctly predict behavior of the system outside the given logs? - Simplicity : Is the model the simplest model that describes the log accurately? Once we measure how well the model conforms to the given set of logs, we move to the next step that is *Enahncement*: where and existing process model is extended or improved using information about the actual process record in some event log. #set page(margin: auto) Another important part of Process Mining is *Performance Analysis* where the goal is to analyze and improve the exectution of the model to use less time and resources an improve its performance data. _Model Repair_ is a special case of *Enhancement* that deals with improving the model to more accurately fit any discrepancies due to events in the system that happen after the model has been constructed. The improvment metrics are usually one of the 4 mentioned above, this paper focuses on the fitness of the model. == Time Aware Models Process Models are represented by formal objects. Petri-Nets are offer a graphical means to represent concurrent systems and a formal semantics for their exectution. The setup is similar to the one in @Timed-Alignments where an event is represented by a letter from a finite alphabet (a set of possible discrete events). Logs is represented by the set of timed words over the alphabet, which is a list of events along with the timestamps on which the event occured. The notion of distance between words, which will give our conformance metric will be similar to the Levenstein's edit distance where we find the quickest way to go from one timed-word to another using a given set of edit actions. We will be using Time Petri Nets, which are a variant of Petri Nets that can check the duration it takes to fire a transition once it's enabled, restricting the set of timed-words it accepts, this can be used to construct relationships and constraints between events and the timestamps at which they can be taken as seen in the logs. = Preliminaries == Time Petri Nets We represent an event as pairs $(a, t)$ where $a in Sigma$ is the action and $t$ denotes the time at which the action was taken. // #losing_definition(1, [ A _timed trace_ is a sequence $gamma in (Sigma times RR^+)^*$ of timed events, seen as a timed word. ]) #definition([ #text(weight: "bold", "Definition 1:") A _timed trace_ is a sequence $gamma in (Sigma times RR^+)^*$ of timed events, seen as a timed word. ]) <def1> We will often ignore the untimed part of the word, i.e. project it on to the time component leaving a word in $(RR^+)^*$. The Process Model we use here is a Time Petri Net. #definition([ *Definition 2:* A _Labelled Timed Petri Net_ (or $"TPN"$) is a tuple $cal(N) = angle.l P, T, F, "SI", Sigma, lambda, M_0, M_f angle.r$ where - $P$ and $T$ are disjoint sets of places and transitions respectively. - $F subset.eq (P times T) union (T times P)$ is the flow relation. - $"SI" : T -> II$ is the static interval function, $"SI"(t) = ("st"(t), "en"(t))$ where - $"st"(t)$ is the smallest valid time interval from the enabling of $t$ to its firing. - $"en"(t)$ is the largest valid time interval from the enabling of $t$ to its firing. - $lambda : T -> Sigma$ is a labelling function for the transition with actions from the action set $Sigma$ - $M_0$ and $M_f : P -> NN$ are the initial and final markings. ]) <def2> Given a transition $t in T$ we define - The Pre-set of $t$ from as $""^circle.filled.small t = {p in P | (p, t) in F}$ - The Post-set as $t^circle.filled.small = { p in P | (t, p) in F }$ (we define pre-set and post-set if places similarly). - We say that a transition $t$ is enabled at a marking $M$ if $forall p in ""^circle.filled.small t, M(p) > 0$ - The set of all enabled transitions in $M$ is given by $"Enabled"(M) = {t in T | t "is enabled in " M}$. #definition([ *Definition 3:* The _state_ (or _configuration_) of a $"TPN" cal(N) = angle.l P, T, F, "SI", Sigma, lambda, M_0, M_f angle.r$ is a pair $S = (M, I)$ where $M$ is a marking and $I : "Enabled"(M) -> RR^+$ is the clock function keeping track of the time since each transition was enabled. We set the inital state to be $(M_0, bold(0))$ where $bold(0)$ is the zero-function. ]) <def3> A transition $t$ is said to be *fireable* after a delay $theta$ from a state $S=(M, I)$ if $t$ is enabled in $M$ and $I(t) + theta in "SI"(t)$ The update to the marking and time function are defined below: #definition([ *Definition 4:* (_Firing Rule_) When a transition $t$ fires after a delay $theta$ from state $S = (M, I)$, the new state $S' = (M', I')$ is given as follows: #math.equation(block: true, numbering: none)[ $M' = (M without ""^circle.filled.small t) union t^circle.filled.small\ I'(t) = cases( I(t) + theta quad quad & "If" t in "Enabled"(M'), 0 & "If" t in "Enabled"(M') "and" t in.not "Enabled"(M), "Undefined" & "Otherwise",)$] This is also denoted as $S [t angle.r S'$ ]) <def4> A valid execution of the model starts at the initial marking $M_0$, fires a sequence of transitions and ends at the final marking $M_f$. #example([ *Example 1:* Consider the following example of a Time Petri Net $N$: \ #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,-1), stroke: 0.5pt, radius: 2mm, name: <p2>), node((2,1), stroke: 0.5pt, radius: 2mm, name: <p3>), node((4,-1), stroke: 0.5pt, radius: 2mm, name: <p4>), node((4,1), stroke: 0.5pt, radius: 2mm, name: <p5>), node((6,0), stroke: 0.5pt, radius: 2mm, name: <p6>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, -1), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((3, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), node((1, 1), $d$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t4>), node((3, 1), $e$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t5>), node((5, 0), $f$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t6>), edge(<p1>, <t1>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<p2>, <t3>, "-|>", bend: -20deg), edge(<p3>, <t4>, "-|>"), edge(<p3>, <t5>, "-|>"), edge(<p4>, <t6>, "-|>", bend: 20deg), edge(<p5>, <t6>, "-|>", bend: -20deg), edge(<t1>, <p2>, "-|>", bend: 20deg), edge(<t1>, <p3>, "-|>", bend: -20deg), edge(<t2>, <p4>, "-|>"), edge(<t3>, <p4>, "-|>", bend: -20deg), edge(<t4>, <p3>, "-|>", bend: -40deg), edge(<t5>, <p5>, "-|>"), edge(<t6>, <p6>, "-|>"), node((0.8, -0.4), text(size: 7pt, $[0, oo)$)), node((3, -1.4), text(size: 7pt, $[1, 1]$)), node((3, 0.35), text(size: 7pt, $[0, 2]$)), node((0.65, 1), text(size: 7pt, $[1, 3]$)), node((3, 1.4), text(size: 7pt, $[1, 4]$)), node((5.3, -0.4), text(size: 7pt, $[0, 3]$)), )) \ One possible execution of $N$ would be for the firing sequence #math.equation(block: true, numbering: none)[ $w = (a, 1)(b, 2)(d, 3)(e, 4)(f, 5)$ ] The initial marking only has $a$ enabled and firing $a$ moves the token to places that enable $b,c,d$ and $e$. Then transition $b$ at time $2$ is fired, which puts a token in one of the places consumed by transition $f$. at time $3$, $d$ is fired followed by $e$ at $4$ and now $f$ is enabled and is fired after a second of wait. ]) <ex1> Now we can define the langauge of the Time Petri Net as follows #definition([ *Definition 5:* A word $w= a_0 a_1 ... a_n in Sigma^*$ is in the _language of the Labelled Time Petri Net_ $cal(L(N))$ if there is a fireable sequence of transitions $(t_0, t_1 ... t_n)$ such that $lambda(t_0, t_1 ... t_n) = w$ and if the sequence of transitions is taken from the initial state $M_0$, it will end at the final transition $M_f$. #math.equation(block: true, numbering: none)[ $(M_0, bold(0)) [t_0 t_1 ... t_n angle.r (M_f, I)$ ] ]) <def5> == Helper Definitions To help with defining some of the things that will be used further , we will use *causal nets* which are like unfoldings of a Petri Net that will make definitions and procedures about walking though the Petri Net easier. #definition([ *Definition 6:* A _Causal Net_ $"CN":= angle.l B, E, G angle.r$ is a finitary, acyclic net where #math.equation(block: true, numbering: none)[ $forall b in B: |b^circle.filled.small | <= 1 and |""^circle.filled.small b| <= 1$ ] ]) <def6> This definition reads as "A Petri net where each place has at most 1 in-transition and at most 1 out-transition". We can also think of this as taking the original petri net and everytime we see a state with multiple out-transitions we copy the state and the net constructed till now once for each transition, we do the same for out-transitions. Once we construct a Causal Net for a Petri Net we need to connect the execution of the Causal Net with that of the Petri Net. This will be done using a homomorphism. #definition([ *Definition 7:* A mapping $p: B union E -> P union T$ is a _homomorphism_ if: - $forall e in E, p(e^circle.filled.small) = p(e)^circle.filled.small$ - $forall e in E, p(""^circle.filled.small e) = ""^circle.filled.small p(e)$ - $M_(0("causal net")) = p(M_(0("Petri net")))$ ]) <def7> We will use a Causal Net and a homomorphism together as #definition([ *Definition 8:* A _Causal Process_ of a Time Petri Net $cal(N)$ is a pair $("CN", p)$ wher $"CN"$ is a causal net and $p$ is a homomorphism from $"CN"$ to $cal(N)$. ]) <def8> Using $p$, the elements of $"CN"$ are identified with their corresponding elements in $cal(N)$. As a result, any run in the Causal Process corresponds uniquely to an untimed run in a Timed Petri Net. To also associate time stamps with our Causal Process we define #definition([ *Definition 9:* A _Timing Function_ $tau: E -> RR^+$ is a function from events of a causal process into time values. ]) <def9> #pagebreak() Another useful way to capture the relaton between a trace and a causal net would be to look at the amount of time a transitions has to wait before it is triggered, this is defined using the flow function #definition([ *Definition 10:* Given a causal process $("CN", p)$ and a timing function $tau: E -> RR^+$, one can define a flow functions $f_tau: E -> RR^+$ as : #math.equation(block: true, numbering: none)[$ f_tau (e) = cases( tau(e) & ""^circle.filled.small ""^circle.filled.small e = emptyset, tau(e) - tau(e') quad quad quad & e' in ""^circle.filled.small ""^circle.filled.small e\, tau(e') = max_(e'' in ""^circle.filled.small ""^circle.filled.small e) { tau(e'') } ) $] ]) == Conformance Metric Conformance Checking tries to measure how well a process model mimics the system, some of the metrices used for that are defined below. #definition([ *Definition 11:* Given a process model $cal(N)$ and a log $L$ we define the _fitness_ of $cal(N)$ with respect to $L$ as #math.equation(block:true, numbering : none)[ $"fitness"(cal(N), L) = 1 - max_(sigma in L) "dist"^*(sigma, cal(L(N)))$ ] ]) <def10> Here $"dist"^*$ is some normalized distance between traces, some options are defined later. The fitness of the model is high if all of the observed behaviors in the logs are closely captured by the model. #definition([ *Definition 12:* Given a process model $cal(N)$ and a log $L$ we define the _precision_ of $cal(N)$ with respect to $L$ as #math.equation(block:true, numbering : none)[ $"precision"(cal(N), L) = 1 - max_(w in cal(L(N))) "dist"^*(L, w)$ ] ]) <def11> We have that the precision of the model is high if it does not exhibit behavior that deviates too much from the observed logs. = Conformance Checking and Model Repair in Timed Setting The Problem of Model Repair is, given an event log, a process model and some budget, compute the edits that can be made to the model under the budget to improve the conformance of the model to the system by some metric. If we let a Time Petri Net be our process model and fitness me our conformance metric then the problem can be stated as : #problem("Model Repair of Time Petri Net (General)")[Given a process $cal(N)$ denoted by a Time Petri Net, a log $L$ and a budget $beta$, we wish to find an edit of the $cal(N) -> cal(N')$ that can be implemented under the given budget constraint and optimally increases the fitness.] <prob1> The two ways in which the model can be imperfect fitness is to have traces in the log such that - $"Untime"(L) subset.not.eq "Untime"(cal(L)(cal(N)))$, i.e there are traces where the sequence of events is not captured by $cal(N)$ - There exists a trace whose untimed version is in the langauge, but the timestamps do not match with any word in the language of $cal(N)$ #example[ *Example 2:* Consider the Process Model in @ex1 and consider the the following observed traces. - $sigma_1 = (a, 0)(a, 1)(b, 2)(d, 3),(e, 3)(f, 5)$ - Clearly, there is no trace in the process model that has more than 1 $a$, which means the structure of the model itself needs to be updated by adding/removing states and transitions. - $sigma_2 = (a, 1)(b, 1)(d, 3)(e, 4)(f, 5)$ - The sequence of transitions in $sigma_2$ can happen in the model but for transition $b$ we need to wait for at least $1$ unit. Changing the timestamp for that transition to $2$ gives a trace that has a run in the petri net. - $sigma_3 = (a, 1)(d, 1)(d, 2)(e, 4)(f, 5)$ - This trace is also not possbile in the model as transition $b$ or $c$ must be fired to enable transition $f$. This can be fixed by relabelling transition $b$ or $c$. ] <ex2> In the untimed setting, this problems is veiwed as minimzing cost over a series of edit moves which are either insertions or deletions to the model. For the timed case there are two aspects that need to be improved, which are mentioned above. This problem has been studied for the untimed case, but the timed settings is more complex. Also, in practice, a large set of malfuctionings can be modeled as temporal anomalies (a slowing down of a conveyor belt speed due to wear, a shorter duration of a work phase due to to an incorrect handling of the operator, a causal change in a timer duration, etc.) and the problem is a pre-requisite for the general case of dealing with all kinds of errors. In this paper we will be focusing on the purely timed version of the model repair problem. i.e where the only anomalies that are fixed are temporal ones (All traces that are not in the language of the model will have an issue similar to $sigma_2$ in @ex2) #problem("Model Repair of Time Petri Nets (Purely Timed)")[Given a process $cal(N)$ denoted by a Time Petri Net, a log $L$ and a budget $beta$, we wish to find an edit of the $cal(N) -> cal(N')$ that can be implemented under the given budget constraint and optimally increases the fitness. We also have the constraint that $forall sigma in L, "Untime"(sigma)$ gives a valid causal process for $cal(N)$.] <prob2> To properly formalize the problem we need definitions for editing out petri net and conformance for which we need to define out distance functions. == Edits and Distances Our notion of distance is similar to that of Levenstein's edit distance where we are given a set of edit actions and we try to go from one trace to another in the shortest way, representing in some sense how different 2 traces are, there are 2 options that are considered usually #definition([ *Definition 13:* (_Stamp Edit_) Given a timing function $gamma : E -> RR^+$, we define the a stamp move as: #math.equation(block: true, numbering: none)[ $ forall x in RR, e in E : "stamp"(x, e)(gamma) = gamma' "where"\ forall e' in E : gamma'(e) = cases( gamma (e') + x quad & e' = e, gamma (e')& "otherwise" ) $ ] ]) <def12> i.e we edit the timestamp at which a particular transition $e$ was taken by $x$. These edits only affect a single transition, and can represent a reading error in the model which needs to corrected without affecting the other timestamps. Another natural edit move the consider is : #definition([ *Definition 14:* (_Delay Edit_) Given a flow function $eta : E -> RR^+$, we define the a delay move as: #math.equation(block: true, numbering: none)[ $ forall x in RR, e in E : "delay"(x, e)(eta) = eta' "where"\ forall e' in E : eta'(e) = cases( eta (e') + x quad & e' = e, eta (e')& "otherwise" ) $ ] ]) Intuitively, this edit represents a change in the duration one waits before taking a transition, this is why timestamps of all subsequent transitions are also changed by the same amount. Using these 2 distance we can define our notion of distance. We assign a cost to each edit move say for both delay and stamp edits we say that the cost of an edit is the same as the change $x$, using that we can define the following 3 definitions: #figure( showybox( frame: ( border-color: green.darken(40%), body-color: green.lighten(95%) ), [ *Definition 15:* (_Stamp Only Distance $d_t$_) Given any two timing functions (or flow functions) $tau_1, tau_2$ over the same causal process $("CN", p)$, we define the stamp distance as #math.equation(block : true, numbering: none)[$ d_t (tau_1, tau_2) = min {"cost"(m) | m in "Stamp"^*, m(tau_1) = tau_2} $] ], [ *Definition 16:* (_Delay Only Distance $d_theta$_) Given any two timing functions (or flow functions) $tau_1, tau_2$ over the same causal process $("CN", p)$, we define the stamp distance as #math.equation(block : true, numbering: none)[$ d_theta (tau_1, tau_2) = min {"cost"(m) | m in "Delay"^*, m(tau_1) = tau_2} $] ], [ *Definition 17:* (_Mixed Moves Distance $d_N$_) Given any two timing functions (or flow functions) $tau_1, tau_2$ over the same causal process $("CN", p)$, we define the stamp distance as #math.equation(block : true, numbering: none)[$ d_N (tau_1, tau_2) = min {"cost"(m) | m in ("Stamp" union "Delay")^*, m(tau_1) = tau_2} $] ], ), kind: "Definition", supplement: [Definition] ) <def14-16> == Alignments In @Alignments, the notion of alignment was defined as the minimal series of corrections need to transform a log trace to match a trace closest to it in the language of the process model. This idea was extended further in @Timed-Alignments, timed alignments are the members of the language of a process model that only differ from a timed trace in the time stamps and can be modified to fit the trace with the least amount of steps. Given a log trace and a model, we can find an alignment, which is the trace in the model that can be converted to the given log trace with the least amount of edits. Hence, the idea behind all of the following sections would be to find an alignment for each log trace, and try to edit it to match the trace, and whenever the edit takes the alignment out of the language of the model, we extend the model to accomodate it. #example[ *Example 3:* The following is a visual representation of the above idea. Consider the following Time Petri Net $N$ #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), node((4,0), stroke: 0.5pt, radius: 2mm, name: <p3>), node((6,0), stroke: 0.5pt, radius: 2mm, name: <p4>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((5, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), edge(<p1>, <t1>, "-|>"), edge(<t1>, <p2>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<t2>, <p3>, "-|>"), edge(<p3>, <t3>, "-|>"), edge(<t3>, <p4>, "-|>"), node((1, -0.4), text(size: 7pt, $[1, 2]$)), node((3, -0.4), text(size: 7pt, $[1, 3]$)), node((5, -0.4), text(size: 7pt, $[0, 2]$)), node((0, -0.4), text(size: 7pt, "i")), node((2, -0.4), text(size: 7pt, "ii")), node((4, -0.4), text(size: 7pt, "iii")), node((6, -0.4), text(size: 7pt, "iv")), )) Along with the flow function $f = (a, 0) (b, 5) (c, 1)$ and the notion of distance used is *Delay-Only* distance. This word does not belong to the language of the model and it's alignment would be the flow function $a_f = (a, 1) (b, 3) (c, 1)$, both of these can be seen in the following diagram #figure( diagram( spacing: (25pt, -8pt), node((0, 0), $5$), node((0, 1), $4$), node((0, 2), $3$), node((0, 3), $2$), node((0, 4), $1$), node((0, 5), $0$), node((0.2, 0), $-$, name: <top>, outset: -10.2pt), node((0.2, 1), $-$), node((0.2, 2), $-$), node((0.2, 3), $-$), node((0.2, 4), $-$), node((0.2, 5), $-$, outset: -10pt), node((0.2, 6), name: <btm>, outset: -0.5pt), edge(<top>, <btm>, "-"), node((1.4, 0), $colgray(-)$, name: <top2>, outset: -10.2pt), node((1.4, 1), $colgray(-)$), node((1.4, 2), $colgray(-)$), node((1.4, 3), $colgray(-)$), node((1.4, 4), $colgray(-)$), node((1.4, 5), $colgray(-)$, outset: -0.8pt), node((1.4, 6), name: <btm2>, outset: -0.5pt), node((1.4, 3), name: <topg>, outset: 1.5pt), node((1.4, 3.52), $bracket.b$), node((1.4, 3.44), $bracket.t$), edge(<top2>, <btm2>, "-", stroke: gray), edge(<topg>, (1.4, 5), "-"), node((4.2, 0), $colgray(-)$, name: <top3>, outset: -10.2pt), node((4.2, 1), $colgray(-)$), node((4.2, 2), $colgray(-)$), node((4.2, 3), $colgray(-)$), node((4.2, 4), $colgray(-)$), node((4.2, 5), $colgray(-)$, outset: -10pt), node((4.2, 6), name: <btm3>, outset: -0.5pt), node((4.2, 3.52), $bracket.b$), node((4.2, 2.44), $bracket.t$), edge(<top3>, <btm3>, "-", stroke: gray), node((4.2, 2), name: <topg2>, outset: 1.5pt), node((4.2, 5), name: <botg2>, outset: 10pt), edge(<topg2>, <botg2>, "-"), node((7.1, 0), $colgray(-)$, name: <top4>, outset: -10.2pt), node((7.1, 1), $colgray(-)$), node((7.1, 2), $colgray(-)$), node((7.1, 3), $colgray(-)$), node((7.1, 4), $colgray(-)$), node((7.1, 5), $colgray(-)$, outset: -10pt), node((7.1, 6), name: <btm4>, outset: -0.5pt), node((7.1, 4.52), $bracket.b$), node((7.1, 3.44), $bracket.t$), edge(<top4>, <btm4>, "-", stroke: gray), node((7.1, 3), name: <topg3>, outset: 1.5pt), node((7.1, 6), name: <botg3>, outset: 0.5pt), edge(<topg3>, <botg3>, "-"), node((9, 0), ""), node((1.4, 5), $compose$), node((4.2, 0), $compose$), node((7.1, 4), $compose$), node((1.4, 4), $colred(circle.tiny.filled)$), node((4.2, 2), $colred(circle.tiny.filled)$), node((7.1, 4), $colred(circle.tiny.filled)$), node((1.7, 4), name: <mt1>, outset: 1.5pt), node((1.7, 6), name: <mb1>, outset: 0.5pt), edge(<mt1>, <mb1>, "->", stroke: gray.darken(30%)), node((4.5, 0), name: <mt2>, outset: 1.5pt), node((4.5, 3), name: <mb2>, outset: 10pt), edge(<mb2>, <mt2>, "->", stroke: gray.darken(30%)), )) Here $compose$ is used to donate the flow function of the $f$, while $colred(circle.tiny.filled)$ is used to denote the flow function of $a_f$ and the intervals represent the static intervals of the 3 transitions. We improve the fitness of the model by getting the $a_f$ closer to $f$ by changing the intervals of $N$ to get the following Time Petri Net $N'$: #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), node((4,0), stroke: 0.5pt, radius: 2mm, name: <p3>), node((6,0), stroke: 0.5pt, radius: 2mm, name: <p4>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((5, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), edge(<p1>, <t1>, "-|>"), edge(<t1>, <p2>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<t2>, <p3>, "-|>"), edge(<p3>, <t3>, "-|>"), edge(<t3>, <p4>, "-|>"), node((1, -0.65), text(size: 7pt, $colgray([1, 2])$)), node((1, -0.4), text(size: 7pt, $[colred(0), 2]$)), node((3, -0.65), text(size: 7pt, $colgray([1, 3])$)), node((3, -0.4), text(size: 7pt, $[1, colred(5)]$)), node((5, -0.65), text(size: 7pt, $colgray([0, 2])$)), node((5, -0.4), text(size: 7pt, $[0, 2]$)), node((0, -0.4), text(size: 7pt, "i")), node((2, -0.4), text(size: 7pt, "ii")), node((4, -0.4), text(size: 7pt, "iii")), node((6, -0.4), text(size: 7pt, "iv")), )) ] <ex3> = Results == Extended Free Choice Time Petri Nets with Delay-Only Distance <restrictions> During the process of editing the model, the alignment will keep changing, to make sure that we can freely change the flow function at a point, without having to worry about the fireability of other transitions that not causally linked to the current one we focus out attention to *Extended Free Choice Petri Nets* #definition[ *Definition 19:* A Time Petri Net is _Extended Free Choice_ iff, for all two transitions $t$ and $t'$ we have that $""^circle.small.filled t sect ""^circle.small.filled t' != emptyset => ""^circle.small.filled t = ""^circle.small.filled t'$ ] The problem dicussed here would be #link(<prob2>)[The Purely Timed Model Repair Problem], so the untimed log is a subset of the untimed language of the petri net. For the edits to the mode, we cost $x$ unit of the budget whenever any bound of a time range of a transition is changed by $x$. The conformance metric used is _fitness_ but here we define it as $- (max_(sigma in L) "dist"_theta (sigma, cal(L(N)))$ which can be easily converted to the normalized distance used in the original definition. #example[ *Example 4:* We start with a simple example and informally go over the procedure Consider the following Time Petri Net $cal(N)$ #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), node((4,-1), stroke: 0.5pt, radius: 2mm, name: <p3a>), node((6,-1), stroke: 0.5pt, radius: 2mm, name: <p4a>), node((4,1), stroke: 0.5pt, radius: 2mm, name: <p3b>), node((6,1), stroke: 0.5pt, radius: 2mm, name: <p4b>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((5, -1), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3a>), node((5, 1), $d$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3b>), edge(<p1>, <t1>, "-|>"), edge(<t1>, <p2>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<t2>, <p3a>, "-|>"), edge(<p3a>, <t3a>, "-|>"), edge(<t3a>, <p4a>, "-|>"), edge(<t2>, <p3b>, "-|>"), edge(<p3b>, <t3b>, "-|>"), edge(<t3b>, <p4b>, "-|>"), node((1, -0.4), text(size: 7pt, $[1, 2]$)), node((3, -0.4), text(size: 7pt, $[1, 3]$)), node((5, -1.4), text(size: 7pt, $[0, 2]$)), node((5, 0.6), text(size: 7pt, $[0, 2]$)), node((0, -0.4), text(size: 7pt, "i")), node((2, -0.4), text(size: 7pt, "ii")), node((4, -1.4), text(size: 7pt, "iii")), node((4, 0.6), text(size: 7pt, "iv")), node((6, -1.4), text(size: 7pt, "v")), node((6, 0.6), text(size: 7pt, "vi")), )) We are also given the following log #math.equation(block:true, numbering:none)[$ L = mat(delim:"{", "[" (a, 1), (b, 5), (c, 9)"]," ; "[" (a, 0), (b, 5), (d, 7)"]" ; ) $] And we are given the budget $beta = 2$. Our goal is to edit the model by making a change of at most $beta = 2$ to the boundaries of the transitions in order to minimize the distance of the logs from the model. The Procedure will go as follows: - The transitions only keep track of the delay that is done to trigger them, so it will be easier to look at traces with wait times before the previous transitions rather than the next one, so we construct the following #math.equation(block:true, numbering:none)[$ F = mat(delim:"{", "[" (a, 1), (b, 4), (c, 4)"]," ; "[" (a, 0), (b, 5), (d, 2)"]" ; ) $] And now it is easier to see that neither of the traces have a corresponding run in $N$, also, the distance of each transition is now easy to compute, for both the transitions it's $3$. So the fitness is $-3$ and we need to reduce the distance of the model from each of the traces to improve the fitness of $N$. Trace 1 takes transition $b$ and $c$ too late, where as Trace $2$ takes transition $b$ late and takes transition $a$ too early. This means that increasing the upper bound of transition $b$ reduce the distance of both traces, and hence will be optimal, so we spend our budget on $b$. After spending 1 unit of budget though the model changes to #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), node((4,-1), stroke: 0.5pt, radius: 2mm, name: <p3a>), node((6,-1), stroke: 0.5pt, radius: 2mm, name: <p4a>), node((4,1), stroke: 0.5pt, radius: 2mm, name: <p3b>), node((6,1), stroke: 0.5pt, radius: 2mm, name: <p4b>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((5, -1), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3a>), node((5, 1), $d$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3b>), edge(<p1>, <t1>, "-|>"), edge(<t1>, <p2>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<t2>, <p3a>, "-|>"), edge(<p3a>, <t3a>, "-|>"), edge(<t3a>, <p4a>, "-|>"), edge(<t2>, <p3b>, "-|>"), edge(<p3b>, <t3b>, "-|>"), edge(<t3b>, <p4b>, "-|>"), node((1, -0.4), text(size: 7pt, $[1, 2]$)), node((3, -0.4), text(size: 7pt, $[1, colred(4)]$)), node((3, -0.65), text(size: 7pt, $colgray([1, 3])$)), node((5, -1.4), text(size: 7pt, $[0, 2]$)), node((5, 0.6), text(size: 7pt, $[0, 2]$)), node((0, -0.4), text(size: 7pt, "i")), node((2, -0.4), text(size: 7pt, "ii")), node((4, -1.4), text(size: 7pt, "iii")), node((4, 0.6), text(size: 7pt, "iv")), node((6, -1.4), text(size: 7pt, "v")), node((6, 0.6), text(size: 7pt, "vi")), )) And now trace 1 takes the transition $b$ at the correct time. If we continue to spend out budget entirely on $b$ then we will not reduce the distance of trace 1 from the model anymore and hence will not change the fitness of the model. ] <ex4> #example[ So we need to rethink our distribution of the budget. We need to reduce the distance of both the traces from the model, hence we need to spend budget in a way that improves fitness. One way to do it optimally would be to split our leftover budget evenly between transition $b$ and $c$. And finally ending with the following model. #figure( diagram( spacing: (25pt, 20pt), node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), node((4,-1), stroke: 0.5pt, radius: 2mm, name: <p3a>), node((6,-1), stroke: 0.5pt, radius: 2mm, name: <p4a>), node((4,1), stroke: 0.5pt, radius: 2mm, name: <p3b>), node((6,1), stroke: 0.5pt, radius: 2mm, name: <p4b>), node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), node((5, -1), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3a>), node((5, 1), $d$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3b>), edge(<p1>, <t1>, "-|>"), edge(<t1>, <p2>, "-|>"), edge(<p2>, <t2>, "-|>"), edge(<t2>, <p3a>, "-|>"), edge(<p3a>, <t3a>, "-|>"), edge(<t3a>, <p4a>, "-|>"), edge(<t2>, <p3b>, "-|>"), edge(<p3b>, <t3b>, "-|>"), edge(<t3b>, <p4b>, "-|>"), node((1, -0.4), text(size: 7pt, $[1, 2]$)), node((3, -0.4), text(size: 7pt, $[1, colred(4.5)]$)), node((3, -0.65), text(size: 7pt, $colgray([1, 4])$)), node((5, -1.4), text(size: 7pt, $[0, colred(2.5)]$)), node((5, -1.65), text(size: 7pt, $colgray([0, 2])$)), node((5, 0.6), text(size: 7pt, $[0, 2]$)), node((0, -0.4), text(size: 7pt, "i")), node((2, -0.4), text(size: 7pt, "ii")), node((4, -1.4), text(size: 7pt, "iii")), node((4, 0.6), text(size: 7pt, "iv")), node((6, -1.4), text(size: 7pt, "v")), node((6, 0.6), text(size: 7pt, "vi")), )) Now we can stop as we have consumed all our budget and the distance of each trace from the model is $1.5$. Note that this is not the only optimal solution, reducing the lower bound of transition $a$ instead of increasing the upper bound $b$ in step to also leads to the same improvement in fitness. ] // #line() // We first focus out attention to a restricted version of problem where: // - The model in question will be a *Sequential Time Petri Net*, which means that, there is a dedicated start state and a dedicated end state, and each transition, connects one state, to one other state in a way that the underlying graph looks like a line graph, which the start and end states acting as the two ends of the graph. // - The problem is restricited to a *Purely Timed Problem*, which means that the sequence of transitions represent the sequence of events in the system correctly, but the timestamps might not be accurate. // - The metric for measure which will be used is going to be *Delay-Only Distance* #link(<def14-16>)[(Definition 15)] // - For the edits to the mode, we cost $x$ unit of the budget whenever any bound of a time range of a transition is changed by $x$. // - The conformance metric used is _fitness_ but here we define it as $- (max_(sigma in L) "dist"_theta (sigma, cal(L(N)))$ which can be easily converted to the normalized distance used in the original definition. // - Another thing to note is that a Sequential Petri Net is isomorphic to it's Causal Net, hence we will not make a distinction between the two here. // #example[ // *Example 4:* We start with a simple example and informally go over the procedure // Consider the following Time Petri Net from @ex3 // #figure( // diagram( // spacing: (25pt, 20pt), // node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), // node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), // node((4,0), stroke: 0.5pt, radius: 2mm, name: <p3>), // node((6,0), stroke: 0.5pt, radius: 2mm, name: <p4>), // node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), // node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), // node((5, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), // edge(<p1>, <t1>, "-|>"), // edge(<t1>, <p2>, "-|>"), // edge(<p2>, <t2>, "-|>"), // edge(<t2>, <p3>, "-|>"), // edge(<p3>, <t3>, "-|>"), // edge(<t3>, <p4>, "-|>"), // node((1, -0.4), text(size: 7pt, $[1, 2]$)), // node((3, -0.4), text(size: 7pt, $[1, 3]$)), // node((5, -0.4), text(size: 7pt, $[0, 2]$)), // node((0, -0.4), text(size: 7pt, "i")), // node((2, -0.4), text(size: 7pt, "ii")), // node((4, -0.4), text(size: 7pt, "iii")), // node((6, -0.4), text(size: 7pt, "iv")), // )) // We are also given the following log // #math.equation(block:true, numbering:none)[$ // L = mat(delim:"{", // "[" (a, 1), (b, 5), (c, 9)"]," ; // "[" (a, 0), (b, 5), (c, 7)"]" ; // ) // $] // And we are given the budget $beta = 2$. // Our goal is to edit the model by making a change of at most $beta = 2$ to the boundaries of the transitions in order to minimize the distance of the logs from the model. The Procedure will go as follows: // - The transitions only keep track of the delay that is done to trigger them, so it will be easier to look at traces with wait times before the previous transitions rather than the next one, so we construct the following // #math.equation(block:true, numbering:none)[$ // F = mat(delim:"{", // "[" (a, 1), (b, 4), (c, 4)"]," ; // "[" (a, 0), (b, 5), (c, 2)"]" ; // ) // $] // And now it is easier to see that neither of the traces have a corresponding run in $N$, also, the distance of each transition is now easy to compute, for both the transitions it's $3$. So the fitness is $-3$ and we need to reduce the distance of the model from each of the traces to improve the fitness of $N$. // Trace 1 takes transition $b$ and $c$ too late, where as Trace $2$ takes transition $b$ late and takes transition $a$ too early. This means that increasing the upper bound of transition $b$ reduce the distance of both traces, and hence will be optimal, so we spend our budget on $b$. // After spending 1 unit of budget though the model changes to // #figure( // diagram( // spacing: (25pt, 20pt), // node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), // node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), // node((4,0), stroke: 0.5pt, radius: 2mm, name: <p3>), // node((6,0), stroke: 0.5pt, radius: 2mm, name: <p4>), // node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), // node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), // node((5, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), // edge(<p1>, <t1>, "-|>"), // edge(<t1>, <p2>, "-|>"), // edge(<p2>, <t2>, "-|>"), // edge(<t2>, <p3>, "-|>"), // edge(<p3>, <t3>, "-|>"), // edge(<t3>, <p4>, "-|>"), // node((1, -0.4), text(size: 7pt, $[1, 2]$)), // node((3, -0.4), text(size: 7pt, $[1, colred(4)]$)), // node((3, -0.65), text(size: 7pt, $colgray([1, 3])$)), // node((5, -0.4), text(size: 7pt, $[0, 2]$)), // node((0, -0.4), text(size: 7pt, "i")), // node((2, -0.4), text(size: 7pt, "ii")), // node((4, -0.4), text(size: 7pt, "iii")), // node((6, -0.4), text(size: 7pt, "iv")), // )) // And now trace 1 takes the transition $b$ at the correct time. If we continue to spend out budget entirely on $b$ then we will not reduce the distance of trace 1 from the model anymore and hence will not change the fitness of the model. // So we need to rethink our distribution of the budget. We need to reduce the distance of both the traces from the model, hence we need to spend budget in a way that improves fitness. One way to do it optimally would be to split our leftover budget evenly between transition $b$ and $c$. And finally ending with the following model. // #figure( // diagram( // spacing: (25pt, 20pt), // node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <p1>), // node((2,0), stroke: 0.5pt, radius: 2mm, name: <p2>), // node((4,0), stroke: 0.5pt, radius: 2mm, name: <p3>), // node((6,0), stroke: 0.5pt, radius: 2mm, name: <p4>), // node((1, 0), $a$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t1>), // node((3, 0), $b$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t2>), // node((5, 0), $c$, stroke: 0.5pt, shape: rect, width: 4mm, height: 5mm, name: <t3>), // edge(<p1>, <t1>, "-|>"), // edge(<t1>, <p2>, "-|>"), // edge(<p2>, <t2>, "-|>"), // edge(<t2>, <p3>, "-|>"), // edge(<p3>, <t3>, "-|>"), // edge(<t3>, <p4>, "-|>"), // node((1, -0.4), text(size: 7pt, $[1, 2]$)), // node((3, -0.4), text(size: 7pt, $[1, colred(4.5)]$)), // node((3, -0.65), text(size: 7pt, $colgray([1, 4])$)), // node((5, -0.4), text(size: 7pt, $[0, colred(2.5)]$)), // node((5, -0.65), text(size: 7pt, $colgray([0, 2])$)), // node((0, -0.4), text(size: 7pt, "i")), // node((2, -0.4), text(size: 7pt, "ii")), // node((4, -0.4), text(size: 7pt, "iii")), // node((6, -0.4), text(size: 7pt, "iv")), // )) // ] <ex4> // Can't figure out how to get a page break so 2 blocks :p // #example[ // Now we can stop as we have consumed all our budget and the distance of each trace from the model is $1.5$. // Note that this is not the only optimal solution, reducing the lower bound of transition $a$ instead of increasing the upper bound $b$ in step to also leads to the same improvement in fitness. // ] // === Reduction to simpler cases // Note that the only 2 kinds of edits one would want to make to the petri net are: // - increasing the upper bound of a transition // - decreasing a lower bound of a transition // This is because these are precisely the edits that would increase the size of the language of the petri net, and other edits make the language of the petri-net strictly smaller. // We now try to reduce the petri-net in a way that we would only have to deal with 1 type of edit. // We restrict the input set of petri nets to those which in which the static interval function is the constant function $x |-> [0,0]$. // Given a Sequential Time Petri-net $cal(N)$ and a trace $tau$ on it we can reduce it to a Sequential Time Petri-net $cal(N')$ with the above definition in the following: // - If the original set of transitions was $T$ then let $T' = {t_"start" | t in T} union {t_"end" | t in T}$ // - Given places $p_i$ and $p_(i+1)$ and a transition $t_i$ such that $""^circle.filled.small t_i = {p_i}$ and $t_i^circle.filled.small = {p_(i+1)}$ we make states $q_i, q'_i, q_(i+1)$ such that // - $""^circle.filled.small text(t_i)_"start" = {q_i}$ // - $text(t_i)_"start"^circle.filled.small = {q'_i}$ // - $""^circle.filled.small text(t_i)_"end" = {q'_i}$ // - $text(t_i)_"end"^circle.filled.small = {q_(i+1)}$ // This procedure takes in each transition and copies it, one copy for the start boundry of the transition, and one for the end. // Given a flow function $f$ for $cal(N)$, we can define $f'$ for $cal(N')$ as follows: // - If $f = f_1 f_2 ... f_n$ we let $f' = text(f'_1)_"start" text(f'_1)_"end" space text(f'_2)_"start" ... text(f'_n)_"end"$, note that $|f'| = 2 |f|$. // - If $t_i = angle.l "st"_i, "en"_i angle.r$ then // - If $f_i < "st"_i$ we let $text(f'_i)_"start" = "st"_i - f_i$ and $text(f'_i)_"end" = 0$ // - If $f_i > "en"_i$ we let $text(f'_i)_"start" = 0$ and $text(f'_i)_"end" = f_i - "en"_i$ // - otherwise we let $text(f'_i)_"start" = text(f'_i)_"end" = 0$ // #example([ // *Example 5:* Consider the Petri Net and the flow functions $F$ from @ex4.\ // Using the above construction we get the following $N'$ // #figure( // diagram( // spacing: (25pt, 20pt), // // Places // node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <q1>), // node((1,0), stroke: 0.5pt, radius: 2mm, name: <p1>), // node((2,0), stroke: 0.5pt, radius: 2mm, name: <q2>), // node((3,0), stroke: 0.5pt, radius: 2mm, name: <p2>), // node((4,0), stroke: 0.5pt, radius: 2mm, name: <q3>), // node((5,0), stroke: 0.5pt, radius: 2mm, name: <p3>), // node((6,0), stroke: 0.5pt, radius: 2mm, name: <q4>), // node((0, -0.4), text(size: 7pt, $q_1$)), // node((2, -0.4), text(size: 7pt, $q_2$)), // node((4, -0.4), text(size: 7pt, $q_3$)), // node((6, -0.4), text(size: 7pt, $q_4$)), // node((1, -0.4), text(size: 7pt, $q'_1$)), // node((3, -0.4), text(size: 7pt, $q'_2$)), // node((5, -0.4), text(size: 7pt, $q'_3$)), // // Transitions // node((0.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t1>), // node((1.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t2>), // node((2.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t3>), // node((3.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t4>), // node((4.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t5>), // node((5.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t6>), // node((0.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((1.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((2.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((3.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((4.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((5.5, 0.4), text(size: 5pt, $[0, 0]$)), // // Arcs // edge(<q1>, <t1>, "|->"), // edge(<t1>, <p1>, "|->"), // edge(<p1>, <t2>, "|->"), // edge(<t2>, <q2>, "|->"), // edge(<q2>, <t3>, "|->"), // edge(<t3>, <p2>, "|->"), // edge(<p2>, <t4>, "|->"), // edge(<t4>, <q3>, "|->"), // edge(<q3>, <t5>, "|->"), // edge(<t5>, <p3>, "|->"), // edge(<p3>, <t6>, "|->"), // edge(<t6>, <q4>, "|->"), // )) // And we can rewrite the set of flow functions as // #math.equation(block:true, numbering:none)[$ // F = mat(delim:"{", // "[" 0, 0, 0, 1, 0, 2"]," ; // "[" 1, 0, 0, 2, 0, 0"]" ; // ) // $] // Note: I am omitting the labelling of the transitions as they are not relevant here. // ]) <ex5> // This conversion intuitively changes the petri net so that one can treat different boundaries of transitions as different transitions, taking a transition too early or too late will translate to taking the first transition too late or taking the second transition too late, which matches our goal of having just 1 kind of edit. // #theorem([ // *Theorem 1:* Given a Petri Net $cal(N)$ and a log $cal(L)$ and it's corresponding net and log in the restricted case $cal(N')$ and $cal(L')$, for each edit of cost $c$ that takes creates $cal(M)$ from $cal(N)$, there is an edit of cost $c$ that creates $cal(M')$ from $cal(N')$ such that // - $cal(M')$ is the restricited version of $cal(M)$ which can also be constructed using the methods defined above // - For any net $N$ and log $L$, it's restricted version $N'$ with $L'$ has the same fitness as $N$ // Hence solving model repair for this restricited class, solves it for general sequential petri nets too. // ]) <theorem1> // Note: This can be made more efficient by only considering transitions for boundaries that matter. i.e, for an upper bound if a transition is taken too late in a trace, or for a lower bound if a transition is taken too early in a trace. This is especially easy to notice in petri-nets produced by editing the above restricited nets, apart from the first reduction, all other reductions either decrease the number of states or keep it the same. // This reduction will be assumed for @unfit, @gradient and @solving. === The $"unfit"$ function <unfit> With Extened Free Choice Petri nets, we get a conflict free subset of petri nets, which means that we can freely alter the delay we wait before taking a transition without having to worry about the fireability of other transitions that are not causally linked to the current one, this makes it easy to compute the alignment of a given trace. The edits we make to a given petri net and how it affects the _fitness_ of the model is captured by the $"unfit"$ function. For each transition, the bounds of its static interval can be edited in 2 ways, increasing the upper bound and reducing the lower bound, so given a petri net with $n$ transitions, every edit made to the petri net can be represented as a vector in $RR^(2n)$. We represent the initial configuratio n of the net $cal(N)$ as $arrow(0)$ and a vector $arrow(v)$ represents the petri net $cal(N)'$ derived by editing $cal(N)$ as follows: If the $k^"th"$ transition of $cal(N)$ is $[a, b]$ then the $k^"th"$ transition of $cal(N')$ is $[a - 2 arrow(v)_(2k - 1), b + arrow(v)_(2k)]$. Here $arrow(v)_m$ is the $m^"th"$ component of $v$ where indexing starts at $1$. More specifically, the $"unfit"$ function has the type $(RR^+)^(2n) -> RR$ and takes in a vector in $(RR^+)^(2n)$ which represents an edited petri net, and returs the maximum distance of any log trace from the language of the model. The following helper function $d' : RR times RR -> RR$ will be useful: $d'(a, b) = max(0, b-a)$ First we define the $"unfit"$ function for the case where there is just 1 trace in the log. Let the trace be $tau$ and it's flow function be $f$. Here the $"unfit"$ function is the same as the distance function. For each transition $t_i$ of the petri net, we define $d_i (arrow(a), arrow(b)) = d'(arrow(a)(i), arrow(b)(i))$. Where $arrow(a)(i)$ is the $i^"th"$ component of $arrow(a)$. Now that we have defined it for each component we let $D (arrow(a), arrow(b))= sum_(i = 1)^n d_i( arrow(a), arrow(b))$ #theorem([ *Theorem 2:* Given a vector $arrow(a)$ representing an edit on the petri net $cal(N)$ producing $cal(N')$ and a flow function $f$ in $cal(F')$, $D(a, f)$ is precisely $"dist"_theta$ between the edited net and $f$. ]) <theorem2> #definition([ *Definition 18:* Given a net $cal(N)$ with constant $[0,0]$ static interval functions for all transitions and a log $cal(L)$, the $"unfit"$ _function_ can be defined as follows. $ "unfit"_cal((N, L))(a) = max_(f in cal(F')) D(a, f) $ ]) <def17> #theorem([ *Corollary 3:* $"unfit"_cal((N, L))(a)$ is negation of the fitness of $cal(N)$ with respect to $cal(L)$. ]) <theorem3> #example([ *Example 5:* Consider the following net $cal(N)$ with 2 transitions #figure( diagram( spacing: (25pt, 20pt), // Places node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <q1>), node((1,0), stroke: 0.5pt, radius: 2mm, name: <q2>), node((2,0), stroke: 0.5pt, radius: 2mm, name: <q3>), node((0, -0.4), text(size: 7pt, $q_1$)), node((1, -0.4), text(size: 7pt, $q_2$)), node((2, -0.4), text(size: 7pt, $q_3$)), // Transitions node((0.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t1>), node((1.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t2>), node((0.5, 0.4), text(size: 5pt, $[0, 0]$)), node((1.5, 0.4), text(size: 5pt, $[0, 0]$)), // Arcs edge(<q1>, <t1>, "|->"), edge(<t1>, <q2>, "|->"), edge(<q2>, <t2>, "|->"), edge(<t2>, <q3>, "|->"), )) And the following set of flow functions for it #math.equation(block:true, numbering:none)[$ F = mat(delim:"{", f_1, =, "[" 2, 0"]," ; f_2, =, "[" 0, 2"]," ; f_3, =, "[" 1.5, 1.5"]," ; ) $] And the following are the graphs of the $"unfit"$ functions #grid( columns: (auto, auto, auto), figure( image("Images/unfit_ex_f1.png", width: 80%), caption: [ $"unfit"$ function for $f_1$ ], kind: "Image", supplement: [Image] ), figure( image("Images/unfit_ex_f2.png", width: 80%), caption: [ $"unfit"$ function for $f_2$ ], kind: "Image", supplement: [Image] ), figure( image("Images/unfit_ex_f3.png", width: 80%), caption: [ $"unfit"$ function for $f_3$ ], kind: "Image", supplement: [Image] ), ) #figure( image("Images/unfit_ex_final.png", width: 30%), caption: [ $"unfit"$ function for the entire problem ], kind: "Image", supplement: [Image] ) ]) <ex5> // #line() // Given an a petri net $cal(N)$ with $n$ transitions, any edit, must increase the upper bound of a transition by some amount, so we can represent an edit by an $n$ dimesional vector, precisely the amount by which each upperbound of a transition is increased (or lowerbound of a transition is decreased), formally, for any edit that takes a petri net $cal(N)$ to a petri net $cal(N')$, one can represent it as the vector $v$ such that $v(i) = cal(N')(i)_"end" - cal(N)(i)_"end"$. Where $cal(N)(i)_"end"$ is the upper bound of the static interval of the $i^"th"$ transition of net $cal(N)$. (We define symmetrically for lowerbounds) // Now the space $(RR^+)^n$ can be mapped to the space of the petri nets that can be creating by editing a given starting petri net $cal(N)$. // This lets us define the $"unfit" : (RR^+)^n -> RR$ function. The input of the function is a vector, which represents an edit to the original petri net $cal(N)$ and the output of the function is the negation of the fitness of the net obtained after the edit. // The following helper function $d' : RR times RR -> RR$ will be useful: $d'(a, b) = max(0, b-a)$ // First we define the $"unfit"$ function for the case where there is just 1 trace in the log. Let the trace be $tau$ and it's flow function be $f$. Here the $"unfit"$ function is the same as the distance function. // For each transition $t_i$ of the petri net, we define $d_i (arrow(a), arrow(b)) = d'(arrow(a)(i), arrow(b)(i))$. Where $arrow(a)(i)$ is the $i^"th"$ component of $arrow(a)$. // Now that we have defined it for each component we let $D (arrow(a), arrow(b))= sum_(i = 1)^n d_i( arrow(a), arrow(b))$ // #theorem([ // *Theorem 2:* Given a vector $arrow(a)$ representing an edit on the petri net $cal(N)$ producing $cal(N')$ and a flow function $f$ in $cal(F')$, $D(a, f)$ is precisely $"dist"_theta$ between the edited net and $f$. // ]) <theorem2> // #definition([ // *Definition 18:* Given a net $cal(N)$ with constant $[0,0]$ static interval functions for all transitions and a log $cal(L)$, the $"unfit"$ _function_ can be defined as follows. // $ // "unfit"_cal((N, L))(a) = max_(f in cal(F')) D(a, f) // $ // ]) <def17> // #theorem([ // *Corollary 3:* $"unfit"_cal((N, L))(a)$ is negation of the fitness of $cal(N)$ with respect to $cal(L)$. // ]) <theorem3> // #example([ // *Example 5:* Consider the following net $cal(N)$ with 2 transitions // #figure( // diagram( // spacing: (25pt, 20pt), // // Places // node((0,0), $circle.filled.small$, stroke: 0.5pt, radius: 2mm, name: <q1>), // node((1,0), stroke: 0.5pt, radius: 2mm, name: <q2>), // node((2,0), stroke: 0.5pt, radius: 2mm, name: <q3>), // node((0, -0.4), text(size: 7pt, $q_1$)), // node((1, -0.4), text(size: 7pt, $q_2$)), // node((2, -0.4), text(size: 7pt, $q_3$)), // // Transitions // node((0.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t1>), // node((1.5, 0), stroke: 0.5pt, shape: rect, width: 0.5mm, height: 5mm, name: <t2>), // node((0.5, 0.4), text(size: 5pt, $[0, 0]$)), // node((1.5, 0.4), text(size: 5pt, $[0, 0]$)), // // Arcs // edge(<q1>, <t1>, "|->"), // edge(<t1>, <q2>, "|->"), // edge(<q2>, <t2>, "|->"), // edge(<t2>, <q3>, "|->"), // )) // And the following set of flow functions for it // #math.equation(block:true, numbering:none)[$ // F = mat(delim:"{", // f_1, =, "[" 2, 0"]," ; // f_2, =, "[" 0, 2"]," ; // f_3, =, "[" 1.5, 1.5"]," ; // ) // $] // And the following are the graphs of the $"unfit"$ functions // #grid( // columns: (auto, auto, auto), // figure( // image("Images/unfit_ex_f1.png", width: 80%), // caption: [ // $"unfit"$ function for $f_1$ // ], // kind: "Image", // supplement: [Image] // ), // figure( // image("Images/unfit_ex_f2.png", width: 80%), // caption: [ // $"unfit"$ function for $f_2$ // ], // kind: "Image", // supplement: [Image] // ), // figure( // image("Images/unfit_ex_f3.png", width: 80%), // caption: [ // $"unfit"$ function for $f_3$ // ], // kind: "Image", // supplement: [Image] // ), // ) // #figure( // image("Images/unfit_ex_final.png", width: 30%), // caption: [ // $"unfit"$ function for the entire problem // ], // kind: "Image", // supplement: [Image] // ) // ]) <ex5> === Gradient Descent <gradient> #theorem([ *Theorem 4:* Some properties of the $"unfit"$ functions: The $"unfit"$ functions turns out to have a ton of nice properties. - The function $d_i$ is continuous and a piecewise function, linear in each part. - It is also a convex function. - Since $"unfit"$ us just multiple $d_i$ functions combined using $max$ and summation. which means it's also continuous, piecewise linear, and a convex function. - Also the domain of the function in general is $(R^+)^n$, but it can be restricted to all vectors such that the sum of their components ($p_1$ norm) $<= beta$ to give the problem a budget. In their scenarios the input space is a convex set. ]) These properties make it really good for gradient descent. Hence we will choose that as our strategy to solve the problem, it also precisely matches an intuitive direct startegy of finding the way to distribute the budgets to make changes optimal locally. Gradient Descent is a greedy mathematical optimization techniques that involves starting at a point in the input space and moving in the opposite direction of the gradient(in the direction where the function decreases at the highest rate), until a local minimum is reached. For our case picking Gradient Descent is a good option because: - The function is piecewise linear, which means each part the gradient is constant and can be computed easily using linear programming. - The function is convex with a convex set as it's input space, which means that there is only 1 set of global minima which is also convex and no other critical point. - The input space can be bounded. If the fitness of the model is $-k$ then the we can bound the input space to all vectors whose $p_1$ norm is less than $k*|L|$. This is also a closed set, hence compact, which means the points that evaluate the global minima are in the set. Therefore we can give the following algorithm for computing the model repair problem - We start at the point $bold(0)$ - At every step we find the direction of steepest descent and we keep moving in that direction: - Consume the entire budget - A new trace becomes a trace with maximum distance from the net - Some trace with maximum distance from net has a transition which is taken at a time delay accepted by the edited petri net. - Whenever we reach such a point, we do recompute the gradient and keep repeating this and the previous step. - We stop when the bugdet runs out or when the model becomes fit. === Computing the Solution <solving> We have defined our unfit function as the maximum of the distances of the log traces from the model. Let $cal(F)_"max"$ be the set of flow functions with maximum distance from $cal(N)$. We now use linear programming to both compute the direction of steepest descent and the amount of budget to be spent in it before a recomputation is required. We will be using the following list of variables. - For every $t$ in the set of transitions of the petri net (denoted by $T$), we have a variable $b_t$ holding the budget assigned to $t$. - For every $f$ in $cal(F)$ we have $"imp"_f$ denoting the reduction in distance of $f$ from $cal(N)$ because of the edit. - $"improvement"$, holding the total change in the fitness of the model, hence our goal will be to maximize this. - $"spend"$, holding the total amount of budget spent. The following constants will be helpful in writing the equations - We let $arrow(a) = bold(0)$ - $beta$, which is the total budget available - $"un-fitness" = "unfit"_cal((N, L))(arrow(a))$ - For each $f in cal(F)$ we have $d_f = D(arrow(a), f)$, that is the distance of $f$ from $cal(N)$ - for each $f in cal(F)$, and $e in E$ we let $d_(f, e) = f(e)$ - For each $f in cal(F)$ and $e in E$ we have $"affects"_(e, f)$ which is - $0$ if $t$ is the $i^"th"$ transitions and $d_i (arrow(a), f)=0$ - $1$ otherwise The goal of linear program is $ "Maximize"("improvement") $ Under the constraints: - Improvement in a flow function is the total budget assigned to the transitions which affect it, for each $f in cal(F)$ we have $ "imp"_f = sum_(t in T) b_t times "affects"_(t, f) $ - Total improvement is the least improvement of all of the flow functions in $cal(F)_"max"$, so for each $f in cal(F)_"max"$ $ "improvement" <= "imp"_f $ - We restrict the amount of budget we spend to exactly 1 unit, this does not give the final edit, but the direction in which we make it. $ 1 = sum_(t in T) b_t $ The variables $b_t$ can be used to compute the direction of steepest descent, but we also add the following constrainst to calculate the largest "step size" after which we need to do recomputation. - We need to recompute when a new flow function needs to be added to $cal(F)_max$ so for all $f in cal(F)$ we compute the amount of budget that can be spent as $ lambda_f = (d_f - "un-fitness")/("imp"_f - "improvement") $ - We need to recompute each time the $"effects"$ variables become outdated, so for all $i in [1 ... n]$ let $t_i$ be the $i^"th"$ transition and forall $f in cal(F)$ we have $ lambda_(f, i) = d_(f, i)/b_(t_i) $ We now consider $lambda$ the minimum of all positive $lambda_f, lambda_(f, i)$ and the budget $beta$, that is the amount of budget that can be spent safely. Once we get values of $b_(t_i)$ after solving the linear program, we set $arrow(a)(i) := arrow(a)(i) + lambda b_(t_i)$ where $t_i$ is the $i^"th"$ transition. We also set $beta := beta - lambda$. After fixing those 2, we recompute all the other constants and do the linear programming again till we consume the entire budget or $"un-fitness"$ becomes $0$. === Proof of correctness The proof of correctness involves showing that we always reach the minimum of the $"unfit"$ function. #theorem[*Lemma 1:* let $y$ be a point where the $"unfit"$ function reaches its minimum in the domain, then from any point $x$ where the function does not reach its minimum, if the algorithm is on $x$ then it has not terminated.] From the construction of the algorithm, we get that the algorithm does not terminate iff there exists a $v$ and an $epsilon > 0$ such that $forall 0 < epsilon' <= epsilon, "unfit"(x + epsilon' v)< "unfit"(x)$. Let $v = y - x$ and we pick $epsilon = 1$ and the above condition is met by convexity of $"unfit"$. $square$ Now we just need to prove that the algorithm terminates: - Each iteration of the algorithm, which involves a linear optimization problem which takes finite amount of time, and an edit which takes a finite amount of time. - We divide the input space into finitely many regions as follows: - first, for each boundary and trace, we have 2 regions, one where the trace respects the static intverval of the boundary, and one where it doesn't. So for $n$ transitions, we get atmost $2^n$ regions. And we consider the common refinement of the partitions created using the process by each trace. Whenever we change a partition during gradient descent, we need to start another iteration of steps. - Within a partition, each the $"unfit"$ function for each trace behaves like a linear map, so for each pair of traces $tau_1$ and $tau_2$, each part can be further divided into 2 parts, where the $"unfit"$ function for $tau_1$ is greater, and where it isn't. This gives us that there are only finitely many parts.\ \ + Here we have that each iteration takes us to a differnt part, by construction. + None of the parts as defined in the first bullet are visited twice, as each region can be given a vector $v_k in {0, 1}^(2n)$ where the $i^"th"$ component is $1$ if the $i^"th"$ transition affects the $k^"th"$ trace, and we switch regions only me converting a $1$ to a $0$ for some vector, this can only happen a finite amount of time. + withing each region, the function is a $max$ of linear functions, and hence gradient descent takes finitely mant steps. So the algorithm must terminate. This proof works for Extended Choice Free Petri Nets as they are conflict free, chaging the time at which a transition is taken does not effect the fireability of any other transitions. // = Archive // === Finding the Distance between the Model and the Log Traces // During the execution of the model repair algorith, we keep updating the model, which means that the set of furthest log traces might change, hence we need to keep track of all the traces. // We define the distance between a trace and a model as the minimum distance between the trace and any word of the model. // #definition([ // *Definition 19:* The _flow function_ (or simply _flow_) of a trace is $f : (Sigma, RR^+)^*$ which keeps track of the delay between successive events and is defined for $tau =tau_1 tau_2 ... tau_n$ as $f = f_1 f_2 ... f_n$ where // #math.equation(block:true, numbering: none)[ // $ // f_i := cases( // tau_1 &i=1, // tau_i - tau_(i-1) quad quad quad&i in [2 ... n] // ) // $ // ] // ]) <def18> // Let the sequence of transitions in $cal(N)$ be $T = {t_1, t_2, t_3 ... t_n}$ where each $"SI"(t_i) = angle.l s_i, e_i angle.r$. // Given a trace $tau = tau_1 tau_2 ... tau_n$ we can say it's distance from $cal(N)$ can be given by the following. // Given a trace, we first find its flow and then we can use that to easily compute it's distance from the model in the following way. // #algo( // title: [ // note that title and parameters // #set text(size: 10pt) // can be content // *Algorithm 1:* Dist // ], // parameters: ([$cal(N)$ : net],[$f$ : flow]), // comment-prefix: [#sym.triangle.stroked.r ], // comment-styles: (fill: rgb(50%, 50%, 50%)), // indent-size: 15pt, // indent-guides: 1pt + gray, // row-gutter: 5pt, // column-gutter: 5pt, // inset: 5pt, // stroke: 2pt + black, // fill: none, // )[ // $"n" := f."length"()$\ // $"dist" := 0$\ // for $i$ in $1 ... "n"$: #i\ // if $f_i < s_i$ #i #comment[Transition taken too early]\ // $"dist" := "dist" + (s_i - f_i)$ #d\ // else if $f_i > e_i$ #i #comment[Transition taken too late]\ // $"dist" := "dist" + (f_i - e_i)$ #d\ // else #i #comment[Transition taken on time]\ // $"dist" := "dist" + 0$ #d #d\ // return $"dist"$ // ] <alg1> // This can be done for each trace in $L$.\ // Now that we have a the set of logs we find the subset of the that are furthest away from the model which can simply be given by // $ // L_("max") = { sigma in L | forall sigma' in L, "Dist"(sigma, cal(N)) >= "Dist"(sigma', cal(N)) } // $ // === Finding the Optimal Changes to Transitions // We want to mimize out budget for a giving change in the fitness of the model, there are a few thigns that we need to keep in mind for that. // - The fitness is only affected by log traces that are furthest away from the model. // - If we want to make a change to transition, which affects some of furthest traces, but not all, it will not change the fitness, as the unaffected trace is still equally far away. So we would like to divide the budget to deal with multiple log traces at once. // Note that we can say that improvement in fitness is $min$ of improvments in each trace. And improvement in a trace $tau$ is just the sum of budgets assigned to the transitions that affect the distance of $tau$. // - For dealing with all boundaries at once, we define $B = {s_i | angle.l s_i, e_i angle.r in T} union {e_i | angle.l s_i, e_i angle.r in T}$ // - The above statement about boundaries affecting traces can be formalized as // $ // "is_affected_by"(tau, b) = cases( // top quad quad quad &b = e_i "and" e_i < tau_i, // top quad quad quad &b = s_i "and" s_i > tau_i, // bot &"otherwise" // ) // $ // We can rephrase our problem of finding an optimal distribution as a linear program, we define use the following definitions for it: // - for each trace $tau_i in L'$ we have a variable $"tr"_i$ which represents how close the trace gets after making the edit. // - for each $b in B$ we have the variable $"ch"_b$ which represent the portion of the budget assigned to that bound. Then we get the following equation for each $tr_i$. // $ // tr_i = sum_(b in B\ "is_affected_by"(tau_i, b) = top) "ch"_b // $ // And we can measure the overall improved by the varible $"improvement"$ which can be given the constraint for each $tau_i in L'$ // $ // "improvement" <= tr_i // $ // Now for are linear program we just need condition // $ // max("improvement") // $ // There are 3 extra constraints that we need to put that the algorithm does not ask us to spend an infinite amount of budget, these constraints correspond to the conditions when we need to stop spending the budget. // - We cannot spend more than the budget // $ // sum_(b in B) "ch"_b <= beta // $ // - We not to re-evaluate the updates each time an edit to a transition makes makes a trace be valid at some point. // - To do that, forall all $tau in L$, we define it's distance $d_(tau, b)$ from a bound $b$ as // - $0$ if it not affected by it. // - Distance between $f_j$ and $t_j$ where $f$ is the flow function of $tau$ and $t_j = angle.l dash, b angle.r "or" angle.l b, dash, angle.r$ // - And $forall tau in L$ and $forall b in B$, if $"is_affected_by"(tau, b)$ we add the constraint // $ // d_(tau, b) >= "ch"_b // $ // - We need to consider new transitions when they join the set of furthest transitions // - For every $tau_i in L$ (note: previously we were only dealing with $L'$) we define // $ // "tr"_i = sum_(b in B\ "is_affected_by"(tau_i, b)=top) "ch"_b \ // "and" \ // D - "improvement" >= "Dist"(tau_i, cal(N)) - "tr"_i // $ // where $D$ is the maximum distance of a log trace from $cal(N)$. // Finding a solution to the above linear program gives an edit for the petri net, and the change in the fitness which is $"improvement"$. // === Editing the Petri Net // Now we go over all $"ch"_b$ // - If $b = e_i$, then we set $e_i <- e_i + "ch"_b$ // - If $b = s_i$, then we set $s_i <- s_i - "ch"_b$ // - We also set $D <- D - "improvement"$ // - We update the budget $beta <- beta - sum_(b in B) "ch"_b$ // And we keep repeating this process until our budget goes down to zero.
https://github.com/touying-typ/touying-exporter
https://raw.githubusercontent.com/touying-typ/touying-exporter/main/README.md
markdown
MIT License
# Touying Exporter Export presentation slides in various formats for Touying. ## HTML Export We generate SVG image files and package them with impress.js into an HTML file. This way, you can open and present it using a browser, and it supports GIF animations and speaker notes. ![image](https://github.com/touying-typ/touying-exporter/assets/34951714/207ddffc-87c8-4976-9bf4-4c6c5e2573ea) ![image](https://github.com/touying-typ/touying-exporter/assets/34951714/eac4976b-7d5d-40b6-8827-88c9a024b89a) [Touying template](https://github.com/touying-typ/touying-template) for online presentation. [Online](https://touying-typ.github.io/touying-template/) ## PPTX Export We generate PNG image files and package them into a PPTX file. This way, you can open and present it using PowerPoint, and it supports speaker notes. ![image](https://github.com/touying-typ/touying-exporter/assets/34951714/3d547c74-fb4b-4c31-81e5-5138a5d727c9) ## Install ```sh pip install touying ``` ## CLI ```text usage: touying compile [-h] [--output OUTPUT] [--root ROOT] [--font-paths [FONT_PATHS ...]] [--start-page START_PAGE] [--count COUNT] [--ppi PPI] [--silent SILENT] [--format {html,pptx,pdf,pdfpc}] input positional arguments: input Input file options: -h, --help show this help message and exit --output OUTPUT Output file --root ROOT Root directory for typst file --font-paths [FONT_PATHS ...] Paths to custom fonts --start-page START_PAGE Page to start from --count COUNT Number of pages to convert --ppi PPI Pixels per inch for PPTX format --silent SILENT Run silently --format {html,pptx,pdf,pdfpc} Output format ``` For example: ```sh touying compile example.typ ``` You will get a `example.html` file. Open it with your browser and start your presentation :-) ## Use it as a python package ```python import touying touying.to_html("example.typ") ``` ## Thanks - [impress.js](https://github.com/impress/impress.js) - [typst-py](https://github.com/messense/typst-py) - [python-pptx](https://github.com/scanny/python-pptx)
https://github.com/typst/packages
https://raw.githubusercontent.com/typst/packages/main/packages/preview/valkyrie/0.1.0/src/context.typ
typst
Apache License 2.0
#let context-proto = ( strict: false, soft-error: false, coerce: false, // TO DO ) /// Appends setting to context. Used for setting the context of child parses. /// /// - ctx (context, none): Current context (if present, or undefined if not), to which contextual flags passed in variadic arguments are appended. /// - ..args (arguments): Variadic contextual flags to set. While it accepts positional arguments, only named contextual flags are used throughout the codebase. #let context(ctx: (:), ..args) = { return (:..context-proto, ..ctx, ..args.named()) }
https://github.com/chamik/gympl-skripta
https://raw.githubusercontent.com/chamik/gympl-skripta/main/helper.typ
typst
Creative Commons Attribution Share Alike 4.0 International
#let hrule() = [#line(length: 100%, stroke: (paint: luma(60%)))] #let poezie(doc) = [ #set align(center) #set footnote.entry( separator: align(left)[#line(length: 30%, stroke: 0.5pt)] ) #show footnote.entry: it => [ #set align(left) #it ] #doc ] #let dilo(nazev, tag, autor, preklad, obdobi, zeme, vydani, druh, zanr) = [ #counter(footnote).update(0) #set par(justify: false) #block( fill: luma(230), inset: 6pt, radius: 4pt, width: 100%, [#columns(3, gutter: 7pt)[ #heading(level: 3, nazev) #label(tag) Autor: #strong(autor) \ #if preklad != "" [ Překlad: #emph(preklad) ] #colbreak() Období: #strong(obdobi) \ Země: #strong(zeme) #colbreak() Rok vydání: #strong(vydani) \ L. druh: #strong(druh) \ L. žánr: #strong(zanr) ]]) ] #let autor(jmeno, narozen, zemrel, povolani, vystudoval, smer, foto) = [ #counter(footnote).update(0) #set par(justify: false) #block( fill: luma(230), inset: 8pt, radius: 4pt, width: 100%, [#columns(3, gutter: 5pt)[ #strong(jmeno) \ #sym.star.op #narozen #h(0.6em) #if zemrel != "" [#sym.dagger #zemrel] Směr: #strong(smer) #colbreak() #strong(povolani) studoval *#vystudoval* #colbreak() #align(center, image(foto, height: 3.5cm)) ]]) ] #let replika(jmeno, repl) = ( [#align(right)[#smallcaps(jmeno)]], [#h(1em) #repl #v(.4em)] )