python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
#include <uapi/linux/bpf.h> #include <uapi/linux/in.h> #include <uapi/linux/if.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/ip.h> #include <uapi/linux/ipv6.h> #include <uapi/linux/if_tunnel.h> #include <bpf/bpf_helpers.h> #include "bpf_legacy.h" #define IP_MF 0x2000 #define IP_OFFSET 0x1FFF struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct flow_key_record { __be32 src; __be32 dst; union { __be32 ports; __be16 port16[2]; }; __u16 thoff; __u8 ip_proto; }; static inline int proto_ports_offset(__u64 proto) { switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_DCCP: case IPPROTO_ESP: case IPPROTO_SCTP: case IPPROTO_UDPLITE: return 0; case IPPROTO_AH: return 4; default: return 0; } } static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) { return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) & (IP_MF | IP_OFFSET); } static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) { __u64 w0 = load_word(ctx, off); __u64 w1 = load_word(ctx, off + 4); __u64 w2 = load_word(ctx, off + 8); __u64 w3 = load_word(ctx, off + 12); return (__u32)(w0 ^ w1 ^ w2 ^ w3); } static inline __u64 parse_ip(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, struct flow_key_record *flow) { __u64 verlen; if (unlikely(ip_is_fragment(skb, nhoff))) *ip_proto = 0; else *ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol)); if (*ip_proto != IPPROTO_GRE) { flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); } verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); if (likely(verlen == 0x45)) nhoff += 20; else nhoff += (verlen & 0xF) << 2; return nhoff; } static inline __u64 parse_ipv6(struct __sk_buff *skb, __u64 nhoff, __u64 *ip_proto, struct flow_key_record *flow) { *ip_proto = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr)); flow->src = ipv6_addr_hash(skb, nhoff + offsetof(struct ipv6hdr, saddr)); flow->dst = ipv6_addr_hash(skb, nhoff + offsetof(struct ipv6hdr, daddr)); nhoff += sizeof(struct ipv6hdr); return nhoff; } static inline bool flow_dissector(struct __sk_buff *skb, struct flow_key_record *flow) { __u64 nhoff = ETH_HLEN; __u64 ip_proto; __u64 proto = load_half(skb, 12); int poff; if (proto == ETH_P_8021AD) { proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); } if (proto == ETH_P_8021Q) { proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); } if (likely(proto == ETH_P_IP)) nhoff = parse_ip(skb, nhoff, &ip_proto, flow); else if (proto == ETH_P_IPV6) nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); else return false; switch (ip_proto) { case IPPROTO_GRE: { struct gre_hdr { __be16 flags; __be16 proto; }; __u64 gre_flags = load_half(skb, nhoff + offsetof(struct gre_hdr, flags)); __u64 gre_proto = load_half(skb, nhoff + offsetof(struct gre_hdr, proto)); if (gre_flags & (GRE_VERSION|GRE_ROUTING)) break; proto = gre_proto; nhoff += 4; if (gre_flags & GRE_CSUM) nhoff += 4; if (gre_flags & GRE_KEY) nhoff += 4; if (gre_flags & GRE_SEQ) nhoff += 4; if (proto == ETH_P_8021Q) { proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); } if (proto == ETH_P_IP) nhoff = parse_ip(skb, nhoff, &ip_proto, flow); else if (proto == ETH_P_IPV6) nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); else return false; break; } case IPPROTO_IPIP: nhoff = parse_ip(skb, nhoff, &ip_proto, flow); break; case IPPROTO_IPV6: nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); break; default: break; } flow->ip_proto = ip_proto; poff = proto_ports_offset(ip_proto); if (poff >= 0) { nhoff += poff; flow->ports = load_word(skb, nhoff); } flow->thoff = (__u16) nhoff; return true; } struct pair { long packets; long bytes; }; struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, __be32); __type(value, struct pair); __uint(max_entries, 1024); } hash_map SEC(".maps"); SEC("socket2") int bpf_prog2(struct __sk_buff *skb) { struct flow_key_record flow = {}; struct pair *value; u32 key; if (!flow_dissector(skb, &flow)) return 0; key = flow.dst; value = bpf_map_lookup_elem(&hash_map, &key); if (value) { __sync_fetch_and_add(&value->packets, 1); __sync_fetch_and_add(&value->bytes, skb->len); } else { struct pair val = {1, skb->len}; bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY); } return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/sockex2_kern.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include <assert.h> #include <fcntl.h> #include <linux/perf_event.h> #include <sched.h> #include <stdio.h> #include <stdlib.h> #include <sys/ioctl.h> #include <sys/time.h> #include <sys/types.h> #include <sys/wait.h> #include <unistd.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "perf-sys.h" #define SAMPLE_PERIOD 0x7fffffffffffffffULL /* counters, values, values2 */ static int map_fd[3]; static void check_on_cpu(int cpu, struct perf_event_attr *attr) { struct bpf_perf_event_value value2; int pmu_fd, error = 0; cpu_set_t set; __u64 value; /* Move to target CPU */ CPU_ZERO(&set); CPU_SET(cpu, &set); assert(sched_setaffinity(0, sizeof(set), &set) == 0); /* Open perf event and attach to the perf_event_array */ pmu_fd = sys_perf_event_open(attr, -1/*pid*/, cpu/*cpu*/, -1/*group_fd*/, 0); if (pmu_fd < 0) { fprintf(stderr, "sys_perf_event_open failed on CPU %d\n", cpu); error = 1; goto on_exit; } assert(bpf_map_update_elem(map_fd[0], &cpu, &pmu_fd, BPF_ANY) == 0); assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0) == 0); /* Trigger the kprobe */ bpf_map_get_next_key(map_fd[1], &cpu, NULL); /* Check the value */ if (bpf_map_lookup_elem(map_fd[1], &cpu, &value)) { fprintf(stderr, "Value missing for CPU %d\n", cpu); error = 1; goto on_exit; } else { fprintf(stderr, "CPU %d: %llu\n", cpu, value); } /* The above bpf_map_lookup_elem should trigger the second kprobe */ if (bpf_map_lookup_elem(map_fd[2], &cpu, &value2)) { fprintf(stderr, "Value2 missing for CPU %d\n", cpu); error = 1; goto on_exit; } else { fprintf(stderr, "CPU %d: counter: %llu, enabled: %llu, running: %llu\n", cpu, value2.counter, value2.enabled, value2.running); } on_exit: assert(bpf_map_delete_elem(map_fd[0], &cpu) == 0 || error); assert(ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE, 0) == 0 || error); assert(close(pmu_fd) == 0 || error); assert(bpf_map_delete_elem(map_fd[1], &cpu) == 0 || error); exit(error); } static void test_perf_event_array(struct perf_event_attr *attr, const char *name) { int i, status, nr_cpus = sysconf(_SC_NPROCESSORS_CONF); pid_t pid[nr_cpus]; int err = 0; printf("Test reading %s counters\n", name); for (i = 0; i < nr_cpus; i++) { pid[i] = fork(); assert(pid[i] >= 0); if (pid[i] == 0) { check_on_cpu(i, attr); exit(1); } } for (i = 0; i < nr_cpus; i++) { assert(waitpid(pid[i], &status, 0) == pid[i]); err |= status; } if (err) printf("Test: %s FAILED\n", name); } static void test_bpf_perf_event(void) { struct perf_event_attr attr_cycles = { .freq = 0, .sample_period = SAMPLE_PERIOD, .inherit = 0, .type = PERF_TYPE_HARDWARE, .read_format = 0, .sample_type = 0, .config = PERF_COUNT_HW_CPU_CYCLES, }; struct perf_event_attr attr_clock = { .freq = 0, .sample_period = SAMPLE_PERIOD, .inherit = 0, .type = PERF_TYPE_SOFTWARE, .read_format = 0, .sample_type = 0, .config = PERF_COUNT_SW_CPU_CLOCK, }; struct perf_event_attr attr_raw = { .freq = 0, .sample_period = SAMPLE_PERIOD, .inherit = 0, .type = PERF_TYPE_RAW, .read_format = 0, .sample_type = 0, /* Intel Instruction Retired */ .config = 0xc0, }; struct perf_event_attr attr_l1d_load = { .freq = 0, .sample_period = SAMPLE_PERIOD, .inherit = 0, .type = PERF_TYPE_HW_CACHE, .read_format = 0, .sample_type = 0, .config = PERF_COUNT_HW_CACHE_L1D | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16), }; struct perf_event_attr attr_llc_miss = { .freq = 0, .sample_period = SAMPLE_PERIOD, .inherit = 0, .type = PERF_TYPE_HW_CACHE, .read_format = 0, .sample_type = 0, .config = PERF_COUNT_HW_CACHE_LL | (PERF_COUNT_HW_CACHE_OP_READ << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16), }; struct perf_event_attr attr_msr_tsc = { .freq = 0, .sample_period = 0, .inherit = 0, /* From /sys/bus/event_source/devices/msr/ */ .type = 7, .read_format = 0, .sample_type = 0, .config = 0, }; test_perf_event_array(&attr_cycles, "HARDWARE-cycles"); test_perf_event_array(&attr_clock, "SOFTWARE-clock"); test_perf_event_array(&attr_raw, "RAW-instruction-retired"); test_perf_event_array(&attr_l1d_load, "HW_CACHE-L1D-load"); /* below tests may fail in qemu */ test_perf_event_array(&attr_llc_miss, "HW_CACHE-LLC-miss"); test_perf_event_array(&attr_msr_tsc, "Dynamic-msr-tsc"); } int main(int argc, char **argv) { struct bpf_link *links[2]; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; int i = 0; snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counters"); map_fd[1] = bpf_object__find_map_fd_by_name(obj, "values"); map_fd[2] = bpf_object__find_map_fd_by_name(obj, "values2"); if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { links[i] = bpf_program__attach(prog); if (libbpf_get_error(links[i])) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); links[i] = NULL; goto cleanup; } i++; } test_bpf_perf_event(); cleanup: for (i--; i >= 0; i--) bpf_link__destroy(links[i]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/tracex6_user.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Facebook */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <signal.h> #include <linux/perf_event.h> #include <errno.h> #include <stdbool.h> #include <bpf/libbpf.h> #include <bpf/bpf.h> #include "trace_helpers.h" #define PRINT_RAW_ADDR 0 /* counts, stackmap */ static int map_fd[2]; static void print_ksym(__u64 addr) { struct ksym *sym; if (!addr) return; sym = ksym_search(addr); if (!sym) { printf("ksym not found. Is kallsyms loaded?\n"); return; } if (PRINT_RAW_ADDR) printf("%s/%llx;", sym->name, addr); else printf("%s;", sym->name); } #define TASK_COMM_LEN 16 struct key_t { char waker[TASK_COMM_LEN]; char target[TASK_COMM_LEN]; __u32 wret; __u32 tret; }; static void print_stack(struct key_t *key, __u64 count) { __u64 ip[PERF_MAX_STACK_DEPTH] = {}; static bool warned; int i; printf("%s;", key->target); if (bpf_map_lookup_elem(map_fd[1], &key->tret, ip) != 0) { printf("---;"); } else { for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--) print_ksym(ip[i]); } printf("-;"); if (bpf_map_lookup_elem(map_fd[1], &key->wret, ip) != 0) { printf("---;"); } else { for (i = 0; i < PERF_MAX_STACK_DEPTH; i++) print_ksym(ip[i]); } printf(";%s %lld\n", key->waker, count); if ((key->tret == -EEXIST || key->wret == -EEXIST) && !warned) { printf("stackmap collisions seen. Consider increasing size\n"); warned = true; } else if (((int)(key->tret) < 0 || (int)(key->wret) < 0)) { printf("err stackid %d %d\n", key->tret, key->wret); } } static void print_stacks(int fd) { struct key_t key = {}, next_key; __u64 value; while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { bpf_map_lookup_elem(fd, &next_key, &value); print_stack(&next_key, value); key = next_key; } } static void int_exit(int sig) { print_stacks(map_fd[0]); exit(0); } int main(int argc, char **argv) { struct bpf_object *obj = NULL; struct bpf_link *links[2]; struct bpf_program *prog; int delay = 1, i = 0; char filename[256]; if (load_kallsyms()) { printf("failed to process /proc/kallsyms\n"); return 2; } snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); obj = NULL; goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts"); map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap"); if (map_fd[0] < 0 || map_fd[1] < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } signal(SIGINT, int_exit); signal(SIGTERM, int_exit); bpf_object__for_each_program(prog, obj) { links[i] = bpf_program__attach(prog); if (libbpf_get_error(links[i])) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); links[i] = NULL; goto cleanup; } i++; } if (argc > 1) delay = atoi(argv[1]); sleep(delay); print_stacks(map_fd[0]); cleanup: for (i--; i >= 0; i--) bpf_link__destroy(links[i]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/offwaketime_user.c
/* Copyright 2016 Netflix, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include <linux/ptrace.h> #include <uapi/linux/bpf.h> #include <uapi/linux/bpf_perf_event.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #define MAX_IPS 8192 struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, u64); __type(value, u32); __uint(max_entries, MAX_IPS); } ip_map SEC(".maps"); SEC("perf_event") int do_sample(struct bpf_perf_event_data *ctx) { u64 ip; u32 *value, init_val = 1; ip = PT_REGS_IP(&ctx->regs); value = bpf_map_lookup_elem(&ip_map, &ip); if (value) *value += 1; else /* E2BIG not tested for this example only */ bpf_map_update_elem(&ip_map, &ip, &init_val, BPF_NOEXIST); return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/sampleip_kern.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Sample Host Bandwidth Manager (HBM) BPF program. * * A cgroup skb BPF egress program to limit cgroup output bandwidth. * It uses a modified virtual token bucket queue to limit average * egress bandwidth. The implementation uses credits instead of tokens. * Negative credits imply that queueing would have happened (this is * a virtual queue, so no queueing is done by it. However, queueing may * occur at the actual qdisc (which is not used for rate limiting). * * This implementation uses 3 thresholds, one to start marking packets and * the other two to drop packets: * CREDIT * - <--------------------------|------------------------> + * | | | 0 * | Large pkt | * | drop thresh | * Small pkt drop Mark threshold * thresh * * The effect of marking depends on the type of packet: * a) If the packet is ECN enabled and it is a TCP packet, then the packet * is ECN marked. * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr * to reduce the congestion window. The current implementation uses a linear * distribution (0% probability at marking threshold, 100% probability * at drop threshold). * c) If the packet is not a TCP packet, then it is dropped. * * If the credit is below the drop threshold, the packet is dropped. If it * is a TCP packet, then it also calls tcp_cwr since packets dropped by * a cgroup skb BPF program do not automatically trigger a call to * tcp_cwr in the current kernel code. * * This BPF program actually uses 2 drop thresholds, one threshold * for larger packets (>= 120 bytes) and another for smaller packets. This * protects smaller packets such as SYNs, ACKs, etc. * * The default bandwidth limit is set at 1Gbps but this can be changed by * a user program through a shared BPF map. In addition, by default this BPF * program does not limit connections using loopback. This behavior can be * overwritten by the user program. There is also an option to calculate * some statistics, such as percent of packets marked or dropped, which * a user program, such as hbm, can access. */ #include "hbm_kern.h" SEC("cgroup_skb/egress") int _hbm_out_cg(struct __sk_buff *skb) { long long delta = 0, delta_send; unsigned long long curtime, sendtime; struct hbm_queue_stats *qsp = NULL; unsigned int queue_index = 0; bool congestion_flag = false; bool ecn_ce_flag = false; struct hbm_pkt_info pkti = {}; struct hbm_vqueue *qdp; bool drop_flag = false; bool cwr_flag = false; int len = skb->len; int rv = ALLOW_PKT; qsp = bpf_map_lookup_elem(&queue_stats, &queue_index); // Check if we should ignore loopback traffic if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1)) return ALLOW_PKT; hbm_get_pkt_info(skb, &pkti); // We may want to account for the length of headers in len // calculation, like ETH header + overhead, specially if it // is a gso packet. But I am not doing it right now. qdp = bpf_get_local_storage(&queue_state, 0); if (!qdp) return ALLOW_PKT; if (qdp->lasttime == 0) hbm_init_edt_vqueue(qdp, 1024); curtime = bpf_ktime_get_ns(); // Begin critical section bpf_spin_lock(&qdp->lock); delta = qdp->lasttime - curtime; // bound bursts to 100us if (delta < -BURST_SIZE_NS) { // negative delta is a credit that allows bursts qdp->lasttime = curtime - BURST_SIZE_NS; delta = -BURST_SIZE_NS; } sendtime = qdp->lasttime; delta_send = BYTES_TO_NS(len, qdp->rate); __sync_add_and_fetch(&(qdp->lasttime), delta_send); bpf_spin_unlock(&qdp->lock); // End critical section // Set EDT of packet skb->tstamp = sendtime; // Check if we should update rate if (qsp != NULL && (qsp->rate * 128) != qdp->rate) qdp->rate = qsp->rate * 128; // Set flags (drop, congestion, cwr) // last packet will be sent in the future, bound latency if (delta > DROP_THRESH_NS || (delta > LARGE_PKT_DROP_THRESH_NS && len > LARGE_PKT_THRESH)) { drop_flag = true; if (pkti.is_tcp && pkti.ecn == 0) cwr_flag = true; } else if (delta > MARK_THRESH_NS) { if (pkti.is_tcp) congestion_flag = true; else drop_flag = true; } if (congestion_flag) { if (bpf_skb_ecn_set_ce(skb)) { ecn_ce_flag = true; } else { if (pkti.is_tcp) { unsigned int rand = bpf_get_prandom_u32(); if (delta >= MARK_THRESH_NS + (rand % MARK_REGION_SIZE_NS)) { // Do congestion control cwr_flag = true; } } else if (len > LARGE_PKT_THRESH) { // Problem if too many small packets? drop_flag = true; congestion_flag = false; } } } if (pkti.is_tcp && drop_flag && pkti.packets_out <= 1) { drop_flag = false; cwr_flag = true; congestion_flag = false; } if (qsp != NULL && qsp->no_cn) cwr_flag = false; hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag, cwr_flag, ecn_ce_flag, &pkti, (int) delta); if (drop_flag) { __sync_add_and_fetch(&(qdp->lasttime), -delta_send); rv = DROP_PKT; } if (cwr_flag) rv |= CWR; return rv; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/hbm_edt_kern.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define KBUILD_MODNAME "foo" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/tcp.h> #include <linux/udp.h> #include <uapi/linux/bpf.h> #include <net/ip.h> #include <bpf/bpf_helpers.h> #define DEFAULT_PKTGEN_UDP_PORT 9 /* copy of 'struct ethhdr' without __packed */ struct eth_hdr { unsigned char h_dest[ETH_ALEN]; unsigned char h_source[ETH_ALEN]; unsigned short h_proto; }; SEC("simple") int handle_ingress(struct __sk_buff *skb) { void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; struct iphdr *iph = data + sizeof(*eth); struct udphdr *udp = data + sizeof(*eth) + sizeof(*iph); void *data_end = (void *)(long)skb->data_end; /* single length check */ if (data + sizeof(*eth) + sizeof(*iph) + sizeof(*udp) > data_end) return 0; if (eth->h_proto != htons(ETH_P_IP)) return 0; if (iph->protocol != IPPROTO_UDP || iph->ihl != 5) return 0; if (ip_is_fragment(iph)) return 0; if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT)) return TC_ACT_SHOT; return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/parse_simple.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <assert.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "sock_example.h" #include <unistd.h> #include <arpa/inet.h> struct pair { __u64 packets; __u64 bytes; }; int main(int ac, char **argv) { struct bpf_program *prog; struct bpf_object *obj; int map_fd, prog_fd; char filename[256]; int i, sock, err; FILE *f; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) return 1; prog = bpf_object__next_program(obj, NULL); bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER); err = bpf_object__load(obj); if (err) return 1; prog_fd = bpf_program__fd(prog); map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map"); sock = open_raw_sock("lo"); assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) == 0); f = popen("ping -4 -c5 localhost", "r"); (void) f; for (i = 0; i < 5; i++) { int key = 0, next_key; struct pair value; while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) { bpf_map_lookup_elem(map_fd, &next_key, &value); printf("ip %s bytes %lld packets %lld\n", inet_ntoa((struct in_addr){htonl(next_key)}), value.bytes, value.packets); key = next_key; } sleep(1); } return 0; }
linux-master
samples/bpf/sockex2_user.c
/* Copyright (C) 2017 Cavium, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. */ #include "vmlinux.h" #include "xdp_sample.bpf.h" #include "xdp_sample_shared.h" #define ETH_ALEN 6 #define ETH_P_8021Q 0x8100 #define ETH_P_8021AD 0x88A8 struct trie_value { __u8 prefix[4]; __be64 value; int ifindex; int metric; __be32 gw; }; /* Key for lpm_trie */ union key_4 { u32 b32[2]; u8 b8[8]; }; struct arp_entry { __be64 mac; __be32 dst; }; struct direct_map { struct arp_entry arp; int ifindex; __be64 mac; }; /* Map for trie implementation */ struct { __uint(type, BPF_MAP_TYPE_LPM_TRIE); __uint(key_size, 8); __uint(value_size, sizeof(struct trie_value)); __uint(max_entries, 50); __uint(map_flags, BPF_F_NO_PREALLOC); } lpm_map SEC(".maps"); /* Map for ARP table */ struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, __be32); __type(value, __be64); __uint(max_entries, 50); } arp_table SEC(".maps"); /* Map to keep the exact match entries in the route table */ struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, __be32); __type(value, struct direct_map); __uint(max_entries, 50); } exact_match SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_DEVMAP); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); __uint(max_entries, 100); } tx_port SEC(".maps"); SEC("xdp") int xdp_router_ipv4_prog(struct xdp_md *ctx) { void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; u64 nh_off = sizeof(*eth); struct datarec *rec; __be16 h_proto; u32 key = 0; rec = bpf_map_lookup_elem(&rx_cnt, &key); if (rec) NO_TEAR_INC(rec->processed); if (data + nh_off > data_end) goto drop; h_proto = eth->h_proto; if (h_proto == bpf_htons(ETH_P_8021Q) || h_proto == bpf_htons(ETH_P_8021AD)) { struct vlan_hdr *vhdr; vhdr = data + nh_off; nh_off += sizeof(struct vlan_hdr); if (data + nh_off > data_end) goto drop; h_proto = vhdr->h_vlan_encapsulated_proto; } switch (bpf_ntohs(h_proto)) { case ETH_P_ARP: if (rec) NO_TEAR_INC(rec->xdp_pass); return XDP_PASS; case ETH_P_IP: { struct iphdr *iph = data + nh_off; struct direct_map *direct_entry; __be64 *dest_mac, *src_mac; int forward_to; if (iph + 1 > data_end) goto drop; direct_entry = bpf_map_lookup_elem(&exact_match, &iph->daddr); /* Check for exact match, this would give a faster lookup */ if (direct_entry && direct_entry->mac && direct_entry->arp.mac) { src_mac = &direct_entry->mac; dest_mac = &direct_entry->arp.mac; forward_to = direct_entry->ifindex; } else { struct trie_value *prefix_value; union key_4 key4; /* Look up in the trie for lpm */ key4.b32[0] = 32; key4.b8[4] = iph->daddr & 0xff; key4.b8[5] = (iph->daddr >> 8) & 0xff; key4.b8[6] = (iph->daddr >> 16) & 0xff; key4.b8[7] = (iph->daddr >> 24) & 0xff; prefix_value = bpf_map_lookup_elem(&lpm_map, &key4); if (!prefix_value) goto drop; forward_to = prefix_value->ifindex; src_mac = &prefix_value->value; if (!src_mac) goto drop; dest_mac = bpf_map_lookup_elem(&arp_table, &iph->daddr); if (!dest_mac) { if (!prefix_value->gw) goto drop; dest_mac = bpf_map_lookup_elem(&arp_table, &prefix_value->gw); if (!dest_mac) { /* Forward the packet to the kernel in * order to trigger ARP discovery for * the default gw. */ if (rec) NO_TEAR_INC(rec->xdp_pass); return XDP_PASS; } } } if (src_mac && dest_mac) { int ret; __builtin_memcpy(eth->h_dest, dest_mac, ETH_ALEN); __builtin_memcpy(eth->h_source, src_mac, ETH_ALEN); ret = bpf_redirect_map(&tx_port, forward_to, 0); if (ret == XDP_REDIRECT) { if (rec) NO_TEAR_INC(rec->xdp_redirect); return ret; } } } default: break; } drop: if (rec) NO_TEAR_INC(rec->xdp_drop); return XDP_DROP; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/xdp_router_ipv4.bpf.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Facebook */ #include <linux/unistd.h> #include <linux/bpf.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <fcntl.h> #include <bpf/bpf.h> static void usage(void) { printf("Usage: test_cgrp2_array_pin [...]\n"); printf(" -F <file> File to pin an BPF cgroup array\n"); printf(" -U <file> Update an already pinned BPF cgroup array\n"); printf(" -v <value> Full path of the cgroup2\n"); printf(" -h Display this help\n"); } int main(int argc, char **argv) { const char *pinned_file = NULL, *cg2 = NULL; int create_array = 1; int array_key = 0; int array_fd = -1; int cg2_fd = -1; int ret = -1; int opt; while ((opt = getopt(argc, argv, "F:U:v:")) != -1) { switch (opt) { /* General args */ case 'F': pinned_file = optarg; break; case 'U': pinned_file = optarg; create_array = 0; break; case 'v': cg2 = optarg; break; default: usage(); goto out; } } if (!cg2 || !pinned_file) { usage(); goto out; } cg2_fd = open(cg2, O_RDONLY); if (cg2_fd < 0) { fprintf(stderr, "open(%s,...): %s(%d)\n", cg2, strerror(errno), errno); goto out; } if (create_array) { array_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_ARRAY, NULL, sizeof(uint32_t), sizeof(uint32_t), 1, NULL); if (array_fd < 0) { fprintf(stderr, "bpf_create_map(BPF_MAP_TYPE_CGROUP_ARRAY,...): %s(%d)\n", strerror(errno), errno); goto out; } } else { array_fd = bpf_obj_get(pinned_file); if (array_fd < 0) { fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", pinned_file, strerror(errno), errno); goto out; } } ret = bpf_map_update_elem(array_fd, &array_key, &cg2_fd, 0); if (ret) { perror("bpf_map_update_elem"); goto out; } if (create_array) { ret = bpf_obj_pin(array_fd, pinned_file); if (ret) { fprintf(stderr, "bpf_obj_pin(..., %s): %s(%d)\n", pinned_file, strerror(errno), errno); goto out; } } out: if (array_fd != -1) close(array_fd); if (cg2_fd != -1) close(cg2_fd); return ret; }
linux-master
samples/bpf/test_cgrp2_array_pin.c
// SPDX-License-Identifier: GPL-2.0 #include <uapi/linux/unistd.h> #include <linux/kbuild.h> #define SYSNR(_NR) DEFINE(SYS ## _NR, _NR) void syscall_defines(void) { COMMENT("Linux system call numbers."); SYSNR(__NR_write); SYSNR(__NR_read); #ifdef __NR_mmap2 SYSNR(__NR_mmap2); #endif #ifdef __NR_mmap SYSNR(__NR_mmap); #endif }
linux-master
samples/bpf/syscall_nrs.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2017 Facebook */ #include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> struct syscalls_enter_open_args { unsigned long long unused; long syscall_nr; long filename_ptr; long flags; long mode; }; struct syscalls_exit_open_args { unsigned long long unused; long syscall_nr; long ret; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, u32); __uint(max_entries, 1); } enter_open_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, u32); __uint(max_entries, 1); } exit_open_map SEC(".maps"); static __always_inline void count(void *map) { u32 key = 0; u32 *value, init_val = 1; value = bpf_map_lookup_elem(map, &key); if (value) *value += 1; else bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST); } #if !defined(__aarch64__) SEC("tracepoint/syscalls/sys_enter_open") int trace_enter_open(struct syscalls_enter_open_args *ctx) { count(&enter_open_map); return 0; } #endif SEC("tracepoint/syscalls/sys_enter_openat") int trace_enter_open_at(struct syscalls_enter_open_args *ctx) { count(&enter_open_map); return 0; } SEC("tracepoint/syscalls/sys_enter_openat2") int trace_enter_open_at2(struct syscalls_enter_open_args *ctx) { count(&enter_open_map); return 0; } #if !defined(__aarch64__) SEC("tracepoint/syscalls/sys_exit_open") int trace_enter_exit(struct syscalls_exit_open_args *ctx) { count(&exit_open_map); return 0; } #endif SEC("tracepoint/syscalls/sys_exit_openat") int trace_enter_exit_at(struct syscalls_exit_open_args *ctx) { count(&exit_open_map); return 0; } SEC("tracepoint/syscalls/sys_exit_openat2") int trace_enter_exit_at2(struct syscalls_exit_open_args *ctx) { count(&exit_open_map); return 0; }
linux-master
samples/bpf/syscall_tp_kern.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define KBUILD_MODNAME "foo" #include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <uapi/linux/ipv6.h> #include <uapi/linux/in.h> #include <uapi/linux/tcp.h> #include <uapi/linux/filter.h> #include <uapi/linux/pkt_cls.h> #include <net/ipv6.h> #include <bpf/bpf_helpers.h> #define _htonl __builtin_bswap32 #define PIN_GLOBAL_NS 2 struct bpf_elf_map { __u32 type; __u32 size_key; __u32 size_value; __u32 max_elem; __u32 flags; __u32 id; __u32 pinning; }; /* copy of 'struct ethhdr' without __packed */ struct eth_hdr { unsigned char h_dest[ETH_ALEN]; unsigned char h_source[ETH_ALEN]; unsigned short h_proto; }; struct bpf_elf_map SEC("maps") tun_iface = { .type = BPF_MAP_TYPE_ARRAY, .size_key = sizeof(int), .size_value = sizeof(int), .pinning = PIN_GLOBAL_NS, .max_elem = 1, }; static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) { if (eth_proto == htons(ETH_P_IP)) return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); else if (eth_proto == htons(ETH_P_IPV6)) return (daddr == _htonl(0x2401face)); return false; } SEC("l2_to_iptun_ingress_forward") int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) { struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; int key = 0, *ifindex; int ret; if (data + sizeof(*eth) > data_end) return TC_ACT_OK; ifindex = bpf_map_lookup_elem(&tun_iface, &key); if (!ifindex) return TC_ACT_OK; if (eth->h_proto == htons(ETH_P_IP)) { char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; struct iphdr *iph = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*iph) > data_end) return TC_ACT_OK; if (iph->protocol != IPPROTO_IPIP) return TC_ACT_OK; bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, _htonl(iph->daddr)); return bpf_redirect(*ifindex, BPF_F_INGRESS); } else if (eth->h_proto == htons(ETH_P_IPV6)) { char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; struct ipv6hdr *ip6h = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) return TC_ACT_OK; if (ip6h->nexthdr != IPPROTO_IPIP && ip6h->nexthdr != IPPROTO_IPV6) return TC_ACT_OK; bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, _htonl(ip6h->daddr.s6_addr32[0]), _htonl(ip6h->daddr.s6_addr32[3])); return bpf_redirect(*ifindex, BPF_F_INGRESS); } return TC_ACT_OK; } SEC("l2_to_iptun_ingress_redirect") int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) { struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; int key = 0, *ifindex; int ret; if (data + sizeof(*eth) > data_end) return TC_ACT_OK; ifindex = bpf_map_lookup_elem(&tun_iface, &key); if (!ifindex) return TC_ACT_OK; if (eth->h_proto == htons(ETH_P_IP)) { char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; struct iphdr *iph = data + sizeof(*eth); __be32 daddr = iph->daddr; if (data + sizeof(*eth) + sizeof(*iph) > data_end) return TC_ACT_OK; if (!is_vip_addr(eth->h_proto, daddr)) return TC_ACT_OK; bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); } else { return TC_ACT_OK; } tkey.tunnel_id = 10000; tkey.tunnel_ttl = 64; tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); return bpf_redirect(*ifindex, 0); } SEC("l2_to_ip6tun_ingress_redirect") int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) { struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; int key = 0, *ifindex; if (data + sizeof(*eth) > data_end) return TC_ACT_OK; ifindex = bpf_map_lookup_elem(&tun_iface, &key); if (!ifindex) return TC_ACT_OK; if (eth->h_proto == htons(ETH_P_IP)) { char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; struct iphdr *iph = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*iph) > data_end) return TC_ACT_OK; if (!is_vip_addr(eth->h_proto, iph->daddr)) return TC_ACT_OK; bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), *ifindex); } else if (eth->h_proto == htons(ETH_P_IPV6)) { char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; struct ipv6hdr *ip6h = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) return TC_ACT_OK; if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) return TC_ACT_OK; bpf_trace_printk(fmt6, sizeof(fmt6), _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); } else { return TC_ACT_OK; } tkey.tunnel_id = 10000; tkey.tunnel_ttl = 64; /* 2401:db02:0:0:0:0:0:66 */ tkey.remote_ipv6[0] = _htonl(0x2401db02); tkey.remote_ipv6[1] = 0; tkey.remote_ipv6[2] = 0; tkey.remote_ipv6[3] = _htonl(0x00000066); bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); return bpf_redirect(*ifindex, 0); } SEC("drop_non_tun_vip") int _drop_non_tun_vip(struct __sk_buff *skb) { struct bpf_tunnel_key tkey = {}; void *data = (void *)(long)skb->data; struct eth_hdr *eth = data; void *data_end = (void *)(long)skb->data_end; if (data + sizeof(*eth) > data_end) return TC_ACT_OK; if (eth->h_proto == htons(ETH_P_IP)) { struct iphdr *iph = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*iph) > data_end) return TC_ACT_OK; if (is_vip_addr(eth->h_proto, iph->daddr)) return TC_ACT_SHOT; } else if (eth->h_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = data + sizeof(*eth); if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) return TC_ACT_OK; if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) return TC_ACT_SHOT; } return TC_ACT_OK; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/tc_l2_redirect_kern.c
#include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(u32)); __uint(max_entries, 64); } counters SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); __type(value, u64); __uint(max_entries, 64); } values SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); __type(value, struct bpf_perf_event_value); __uint(max_entries, 64); } values2 SEC(".maps"); SEC("kprobe/htab_map_get_next_key") int bpf_prog1(struct pt_regs *ctx) { u32 key = bpf_get_smp_processor_id(); u64 count, *val; s64 error; count = bpf_perf_event_read(&counters, key); error = (s64)count; if (error <= -2 && error >= -22) return 0; val = bpf_map_lookup_elem(&values, &key); if (val) *val = count; else bpf_map_update_elem(&values, &key, &count, BPF_NOEXIST); return 0; } /* * Since *_map_lookup_elem can't be expected to trigger bpf programs * due to potential deadlocks (bpf_disable_instrumentation), this bpf * program will be attached to bpf_map_copy_value (which is called * from map_lookup_elem) and will only filter the hashtable type. */ SEC("kprobe/bpf_map_copy_value") int BPF_KPROBE(bpf_prog2, struct bpf_map *map) { u32 key = bpf_get_smp_processor_id(); struct bpf_perf_event_value *val, buf; enum bpf_map_type type; int error; type = BPF_CORE_READ(map, map_type); if (type != BPF_MAP_TYPE_HASH) return 0; error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf)); if (error) return 0; val = bpf_map_lookup_elem(&values2, &key); if (val) *val = buf; else bpf_map_update_elem(&values2, &key, &buf, BPF_NOEXIST); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex6.bpf.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Sargun Dhillon <[email protected]> */ #define _GNU_SOURCE #include <stdio.h> #include <unistd.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "cgroup_helpers.h" #define CGROUP_PATH "/my-cgroup" int main(int argc, char **argv) { pid_t remote_pid, local_pid = getpid(); int cg2 = -1, idx = 0, rc = 1; struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; int map_fd[2]; snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (!prog) { printf("finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd[0] = bpf_object__find_map_fd_by_name(obj, "cgroup_map"); map_fd[1] = bpf_object__find_map_fd_by_name(obj, "perf_map"); if (map_fd[0] < 0 || map_fd[1] < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } if (setup_cgroup_environment()) goto err; cg2 = create_and_get_cgroup(CGROUP_PATH); if (cg2 < 0) goto err; if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { log_err("Adding target cgroup to map"); goto err; } if (join_cgroup(CGROUP_PATH)) goto err; /* * The installed helper program catched the sync call, and should * write it to the map. */ sync(); bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); if (local_pid != remote_pid) { fprintf(stderr, "BPF Helper didn't write correct PID to map, but: %d\n", remote_pid); goto err; } /* Verify the negative scenario; leave the cgroup */ if (join_cgroup("/")) goto err; remote_pid = 0; bpf_map_update_elem(map_fd[1], &idx, &remote_pid, BPF_ANY); sync(); bpf_map_lookup_elem(map_fd[1], &idx, &remote_pid); if (local_pid == remote_pid) { fprintf(stderr, "BPF cgroup negative test did not work\n"); goto err; } rc = 0; err: if (cg2 != -1) close(cg2); cleanup_cgroup_environment(); cleanup: bpf_link__destroy(link); bpf_object__close(obj); return rc; }
linux-master
samples/bpf/test_current_task_under_cgroup_user.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Example program for Host Bandwidth Managment * * This program loads a cgroup skb BPF program to enforce cgroup output * (egress) or input (ingress) bandwidth limits. * * USAGE: hbm [-d] [-l] [-n <id>] [-r <rate>] [-s] [-t <secs>] [-w] [-h] [prog] * Where: * -d Print BPF trace debug buffer * -l Also limit flows doing loopback * -n <#> To create cgroup \"/hbm#\" and attach prog * Default is /hbm1 * --no_cn Do not return cn notifications * -r <rate> Rate limit in Mbps * -s Get HBM stats (marked, dropped, etc.) * -t <time> Exit after specified seconds (default is 0) * -w Work conserving flag. cgroup can increase its bandwidth * beyond the rate limit specified while there is available * bandwidth. Current implementation assumes there is only * NIC (eth0), but can be extended to support multiple NICs. * Currrently only supported for egress. * -h Print this info * prog BPF program file name. Name defaults to hbm_out_kern.o */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <sys/time.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <linux/unistd.h> #include <linux/compiler.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include <getopt.h> #include "cgroup_helpers.h" #include "hbm.h" #include "bpf_util.h" #include <bpf/libbpf.h> bool outFlag = true; int minRate = 1000; /* cgroup rate limit in Mbps */ int rate = 1000; /* can grow if rate conserving is enabled */ int dur = 1; bool stats_flag; bool loopback_flag; bool debugFlag; bool work_conserving_flag; bool no_cn_flag; bool edt_flag; static void Usage(void); static void read_trace_pipe2(void); static void do_error(char *msg, bool errno_flag); #define TRACEFS "/sys/kernel/tracing/" static struct bpf_program *bpf_prog; static struct bpf_object *obj; static int queue_stats_fd; static void read_trace_pipe2(void) { int trace_fd; FILE *outf; char *outFname = "hbm_out.log"; trace_fd = open(TRACEFS "trace_pipe", O_RDONLY, 0); if (trace_fd < 0) { printf("Error opening trace_pipe\n"); return; } // Future support of ingress // if (!outFlag) // outFname = "hbm_in.log"; outf = fopen(outFname, "w"); if (outf == NULL) printf("Error creating %s\n", outFname); while (1) { static char buf[4097]; ssize_t sz; sz = read(trace_fd, buf, sizeof(buf) - 1); if (sz > 0) { buf[sz] = 0; puts(buf); if (outf != NULL) { fprintf(outf, "%s\n", buf); fflush(outf); } } } } static void do_error(char *msg, bool errno_flag) { if (errno_flag) printf("ERROR: %s, errno: %d\n", msg, errno); else printf("ERROR: %s\n", msg); exit(1); } static int prog_load(char *prog) { struct bpf_program *pos; const char *sec_name; obj = bpf_object__open_file(prog, NULL); if (libbpf_get_error(obj)) { printf("ERROR: opening BPF object file failed\n"); return 1; } /* load BPF program */ if (bpf_object__load(obj)) { printf("ERROR: loading BPF object file failed\n"); goto err; } bpf_object__for_each_program(pos, obj) { sec_name = bpf_program__section_name(pos); if (sec_name && !strcmp(sec_name, "cgroup_skb/egress")) { bpf_prog = pos; break; } } if (!bpf_prog) { printf("ERROR: finding a prog in obj file failed\n"); goto err; } queue_stats_fd = bpf_object__find_map_fd_by_name(obj, "queue_stats"); if (queue_stats_fd < 0) { printf("ERROR: finding a map in obj file failed\n"); goto err; } return 0; err: bpf_object__close(obj); return 1; } static int run_bpf_prog(char *prog, int cg_id) { struct hbm_queue_stats qstats = {0}; char cg_dir[100], cg_pin_path[100]; struct bpf_link *link = NULL; int key = 0; int cg1 = 0; int rc = 0; sprintf(cg_dir, "/hbm%d", cg_id); rc = prog_load(prog); if (rc != 0) return rc; if (setup_cgroup_environment()) { printf("ERROR: setting cgroup environment\n"); goto err; } cg1 = create_and_get_cgroup(cg_dir); if (!cg1) { printf("ERROR: create_and_get_cgroup\n"); goto err; } if (join_cgroup(cg_dir)) { printf("ERROR: join_cgroup\n"); goto err; } qstats.rate = rate; qstats.stats = stats_flag ? 1 : 0; qstats.loopback = loopback_flag ? 1 : 0; qstats.no_cn = no_cn_flag ? 1 : 0; if (bpf_map_update_elem(queue_stats_fd, &key, &qstats, BPF_ANY)) { printf("ERROR: Could not update map element\n"); goto err; } if (!outFlag) bpf_program__set_expected_attach_type(bpf_prog, BPF_CGROUP_INET_INGRESS); link = bpf_program__attach_cgroup(bpf_prog, cg1); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach_cgroup failed\n"); goto err; } sprintf(cg_pin_path, "/sys/fs/bpf/hbm%d", cg_id); rc = bpf_link__pin(link, cg_pin_path); if (rc < 0) { printf("ERROR: bpf_link__pin failed: %d\n", rc); goto err; } if (work_conserving_flag) { struct timeval t0, t_last, t_new; FILE *fin; unsigned long long last_eth_tx_bytes, new_eth_tx_bytes; signed long long last_cg_tx_bytes, new_cg_tx_bytes; signed long long delta_time, delta_bytes, delta_rate; int delta_ms; #define DELTA_RATE_CHECK 10000 /* in us */ #define RATE_THRESHOLD 9500000000 /* 9.5 Gbps */ bpf_map_lookup_elem(queue_stats_fd, &key, &qstats); if (gettimeofday(&t0, NULL) < 0) do_error("gettimeofday failed", true); t_last = t0; fin = fopen("/sys/class/net/eth0/statistics/tx_bytes", "r"); if (fscanf(fin, "%llu", &last_eth_tx_bytes) != 1) do_error("fscanf fails", false); fclose(fin); last_cg_tx_bytes = qstats.bytes_total; while (true) { usleep(DELTA_RATE_CHECK); if (gettimeofday(&t_new, NULL) < 0) do_error("gettimeofday failed", true); delta_ms = (t_new.tv_sec - t0.tv_sec) * 1000 + (t_new.tv_usec - t0.tv_usec)/1000; if (delta_ms > dur * 1000) break; delta_time = (t_new.tv_sec - t_last.tv_sec) * 1000000 + (t_new.tv_usec - t_last.tv_usec); if (delta_time == 0) continue; t_last = t_new; fin = fopen("/sys/class/net/eth0/statistics/tx_bytes", "r"); if (fscanf(fin, "%llu", &new_eth_tx_bytes) != 1) do_error("fscanf fails", false); fclose(fin); printf(" new_eth_tx_bytes:%llu\n", new_eth_tx_bytes); bpf_map_lookup_elem(queue_stats_fd, &key, &qstats); new_cg_tx_bytes = qstats.bytes_total; delta_bytes = new_eth_tx_bytes - last_eth_tx_bytes; last_eth_tx_bytes = new_eth_tx_bytes; delta_rate = (delta_bytes * 8000000) / delta_time; printf("%5d - eth_rate:%.1fGbps cg_rate:%.3fGbps", delta_ms, delta_rate/1000000000.0, rate/1000.0); if (delta_rate < RATE_THRESHOLD) { /* can increase cgroup rate limit, but first * check if we are using the current limit. * Currently increasing by 6.25%, unknown * if that is the optimal rate. */ int rate_diff100; delta_bytes = new_cg_tx_bytes - last_cg_tx_bytes; last_cg_tx_bytes = new_cg_tx_bytes; delta_rate = (delta_bytes * 8000000) / delta_time; printf(" rate:%.3fGbps", delta_rate/1000000000.0); rate_diff100 = (((long long)rate)*1000000 - delta_rate) * 100 / (((long long) rate) * 1000000); printf(" rdiff:%d", rate_diff100); if (rate_diff100 <= 3) { rate += (rate >> 4); if (rate > RATE_THRESHOLD / 1000000) rate = RATE_THRESHOLD / 1000000; qstats.rate = rate; printf(" INC\n"); } else { printf("\n"); } } else { /* Need to decrease cgroup rate limit. * Currently decreasing by 12.5%, unknown * if that is optimal */ printf(" DEC\n"); rate -= (rate >> 3); if (rate < minRate) rate = minRate; qstats.rate = rate; } if (bpf_map_update_elem(queue_stats_fd, &key, &qstats, BPF_ANY)) do_error("update map element fails", false); } } else { sleep(dur); } // Get stats! if (stats_flag && bpf_map_lookup_elem(queue_stats_fd, &key, &qstats)) { char fname[100]; FILE *fout; if (!outFlag) sprintf(fname, "hbm.%d.in", cg_id); else sprintf(fname, "hbm.%d.out", cg_id); fout = fopen(fname, "w"); fprintf(fout, "id:%d\n", cg_id); fprintf(fout, "ERROR: Could not lookup queue_stats\n"); fclose(fout); } else if (stats_flag && qstats.lastPacketTime > qstats.firstPacketTime) { long long delta_us = (qstats.lastPacketTime - qstats.firstPacketTime)/1000; unsigned int rate_mbps = ((qstats.bytes_total - qstats.bytes_dropped) * 8 / delta_us); double percent_pkts, percent_bytes; char fname[100]; FILE *fout; int k; static const char *returnValNames[] = { "DROP_PKT", "ALLOW_PKT", "DROP_PKT_CWR", "ALLOW_PKT_CWR" }; #define RET_VAL_COUNT 4 // Future support of ingress // if (!outFlag) // sprintf(fname, "hbm.%d.in", cg_id); // else sprintf(fname, "hbm.%d.out", cg_id); fout = fopen(fname, "w"); fprintf(fout, "id:%d\n", cg_id); fprintf(fout, "rate_mbps:%d\n", rate_mbps); fprintf(fout, "duration:%.1f secs\n", (qstats.lastPacketTime - qstats.firstPacketTime) / 1000000000.0); fprintf(fout, "packets:%d\n", (int)qstats.pkts_total); fprintf(fout, "bytes_MB:%d\n", (int)(qstats.bytes_total / 1000000)); fprintf(fout, "pkts_dropped:%d\n", (int)qstats.pkts_dropped); fprintf(fout, "bytes_dropped_MB:%d\n", (int)(qstats.bytes_dropped / 1000000)); // Marked Pkts and Bytes percent_pkts = (qstats.pkts_marked * 100.0) / (qstats.pkts_total + 1); percent_bytes = (qstats.bytes_marked * 100.0) / (qstats.bytes_total + 1); fprintf(fout, "pkts_marked_percent:%6.2f\n", percent_pkts); fprintf(fout, "bytes_marked_percent:%6.2f\n", percent_bytes); // Dropped Pkts and Bytes percent_pkts = (qstats.pkts_dropped * 100.0) / (qstats.pkts_total + 1); percent_bytes = (qstats.bytes_dropped * 100.0) / (qstats.bytes_total + 1); fprintf(fout, "pkts_dropped_percent:%6.2f\n", percent_pkts); fprintf(fout, "bytes_dropped_percent:%6.2f\n", percent_bytes); // ECN CE markings percent_pkts = (qstats.pkts_ecn_ce * 100.0) / (qstats.pkts_total + 1); fprintf(fout, "pkts_ecn_ce:%6.2f (%d)\n", percent_pkts, (int)qstats.pkts_ecn_ce); // Average cwnd fprintf(fout, "avg cwnd:%d\n", (int)(qstats.sum_cwnd / (qstats.sum_cwnd_cnt + 1))); // Average rtt fprintf(fout, "avg rtt:%d\n", (int)(qstats.sum_rtt / (qstats.pkts_total + 1))); // Average credit if (edt_flag) fprintf(fout, "avg credit_ms:%.03f\n", (qstats.sum_credit / (qstats.pkts_total + 1.0)) / 1000000.0); else fprintf(fout, "avg credit:%d\n", (int)(qstats.sum_credit / (1500 * ((int)qstats.pkts_total ) + 1))); // Return values stats for (k = 0; k < RET_VAL_COUNT; k++) { percent_pkts = (qstats.returnValCount[k] * 100.0) / (qstats.pkts_total + 1); fprintf(fout, "%s:%6.2f (%d)\n", returnValNames[k], percent_pkts, (int)qstats.returnValCount[k]); } fclose(fout); } if (debugFlag) read_trace_pipe2(); goto cleanup; err: rc = 1; cleanup: bpf_link__destroy(link); bpf_object__close(obj); if (cg1 != -1) close(cg1); if (rc != 0) cleanup_cgroup_environment(); return rc; } static void Usage(void) { printf("This program loads a cgroup skb BPF program to enforce\n" "cgroup output (egress) bandwidth limits.\n\n" "USAGE: hbm [-o] [-d] [-l] [-n <id>] [--no_cn] [-r <rate>]\n" " [-s] [-t <secs>] [-w] [-h] [prog]\n" " Where:\n" " -o indicates egress direction (default)\n" " -d print BPF trace debug buffer\n" " --edt use fq's Earliest Departure Time\n" " -l also limit flows using loopback\n" " -n <#> to create cgroup \"/hbm#\" and attach prog\n" " Default is /hbm1\n" " --no_cn disable CN notifications\n" " -r <rate> Rate in Mbps\n" " -s Update HBM stats\n" " -t <time> Exit after specified seconds (default is 0)\n" " -w Work conserving flag. cgroup can increase\n" " bandwidth beyond the rate limit specified\n" " while there is available bandwidth. Current\n" " implementation assumes there is only eth0\n" " but can be extended to support multiple NICs\n" " -h print this info\n" " prog BPF program file name. Name defaults to\n" " hbm_out_kern.o\n"); } int main(int argc, char **argv) { char *prog = "hbm_out_kern.o"; int k; int cg_id = 1; char *optstring = "iodln:r:st:wh"; struct option loptions[] = { {"no_cn", 0, NULL, 1}, {"edt", 0, NULL, 2}, {NULL, 0, NULL, 0} }; while ((k = getopt_long(argc, argv, optstring, loptions, NULL)) != -1) { switch (k) { case 1: no_cn_flag = true; break; case 2: prog = "hbm_edt_kern.o"; edt_flag = true; break; case'o': break; case 'd': debugFlag = true; break; case 'l': loopback_flag = true; break; case 'n': cg_id = atoi(optarg); break; case 'r': minRate = atoi(optarg) * 1.024; rate = minRate; break; case 's': stats_flag = true; break; case 't': dur = atoi(optarg); break; case 'w': work_conserving_flag = true; break; case '?': if (optopt == 'n' || optopt == 'r' || optopt == 't') fprintf(stderr, "Option -%c requires an argument.\n\n", optopt); case 'h': default: Usage(); return 0; } } if (optind < argc) prog = argv[optind]; printf("HBM prog: %s\n", prog != NULL ? prog : "NULL"); /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); return run_bpf_prog(prog, cg_id); }
linux-master
samples/bpf/hbm.c
// SPDX-License-Identifier: GPL-2.0 #include "vmlinux.h" #include "net_shared.h" #include <bpf/bpf_helpers.h> SEC("cgroup/sock") int bpf_prog1(struct bpf_sock *sk) { char fmt[] = "socket: family %d type %d protocol %d\n"; char fmt2[] = "socket: uid %u gid %u\n"; __u64 gid_uid = bpf_get_current_uid_gid(); __u32 uid = gid_uid & 0xffffffff; __u32 gid = gid_uid >> 32; bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol); bpf_trace_printk(fmt2, sizeof(fmt2), uid, gid); /* block AF_INET6, SOCK_DGRAM, IPPROTO_ICMPV6 sockets * ie., make ping6 fail */ if (sk->family == AF_INET6 && sk->type == SOCK_DGRAM && sk->protocol == IPPROTO_ICMPV6) return 0; return 1; } SEC("cgroup/sock") int bpf_prog2(struct bpf_sock *sk) { char fmt[] = "socket: family %d type %d protocol %d\n"; bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol); /* block AF_INET, SOCK_DGRAM, IPPROTO_ICMP sockets * ie., make ping fail */ if (sk->family == AF_INET && sk->type == SOCK_DGRAM && sk->protocol == IPPROTO_ICMP) return 0; return 1; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/sock_flags.bpf.c
/* Copyright (c) 2017 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * BPF program to set initial receive window to 40 packets when using IPv6 * and the first 5.5 bytes of the IPv6 addresses are not the same (in this * example that means both hosts are not the same datacenter). * * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program. */ #include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <linux/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #define DEBUG 1 SEC("sockops") int bpf_rwnd(struct bpf_sock_ops *skops) { int rv = -1; int op; /* For testing purposes, only execute rest of BPF program * if neither port numberis 55601 */ if (bpf_ntohl(skops->remote_port) != 55601 && skops->local_port != 55601) { skops->reply = -1; return 1; } op = (int) skops->op; #ifdef DEBUG bpf_printk("BPF command: %d\n", op); #endif /* Check for RWND_INIT operation and IPv6 addresses */ if (op == BPF_SOCK_OPS_RWND_INIT && skops->family == AF_INET6) { /* If the first 5.5 bytes of the IPv6 address are not the same * then both hosts are not in the same datacenter * so use a larger initial advertized window (40 packets) */ if (skops->local_ip6[0] != skops->remote_ip6[0] || (bpf_ntohl(skops->local_ip6[1]) & 0xfffff000) != (bpf_ntohl(skops->remote_ip6[1]) & 0xfffff000)) rv = 40; } #ifdef DEBUG bpf_printk("Returning %d\n", rv); #endif skops->reply = rv; return 1; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/tcp_rwnd_kern.c
// SPDX-License-Identifier: GPL-2.0 /* Refer to samples/bpf/tcp_bpf.readme for the instructions on * how to run this sample program. */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #define INTERVAL 1000000000ULL int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; struct { __u32 type; __u32 map_flags; int *key; __u64 *value; } bpf_next_dump SEC(".maps") = { .type = BPF_MAP_TYPE_SK_STORAGE, .map_flags = BPF_F_NO_PREALLOC, }; SEC("sockops") int _sockops(struct bpf_sock_ops *ctx) { struct bpf_tcp_sock *tcp_sk; struct bpf_sock *sk; __u64 *next_dump; __u64 now; switch (ctx->op) { case BPF_SOCK_OPS_TCP_CONNECT_CB: bpf_sock_ops_cb_flags_set(ctx, BPF_SOCK_OPS_RTT_CB_FLAG); return 1; case BPF_SOCK_OPS_RTT_CB: break; default: return 1; } sk = ctx->sk; if (!sk) return 1; next_dump = bpf_sk_storage_get(&bpf_next_dump, sk, 0, BPF_SK_STORAGE_GET_F_CREATE); if (!next_dump) return 1; now = bpf_ktime_get_ns(); if (now < *next_dump) return 1; tcp_sk = bpf_tcp_sock(sk); if (!tcp_sk) return 1; *next_dump = now + INTERVAL; bpf_printk("dsack_dups=%u delivered=%u\n", tcp_sk->dsack_dups, tcp_sk->delivered); bpf_printk("delivered_ce=%u icsk_retransmits=%u\n", tcp_sk->delivered_ce, tcp_sk->icsk_retransmits); return 1; }
linux-master
samples/bpf/tcp_dumpstats_kern.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com */ #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <unistd.h> #include <stdbool.h> #include <string.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "bpf_util.h" #define SLOTS 100 static void clear_stats(int fd) { unsigned int nr_cpus = bpf_num_possible_cpus(); __u64 values[nr_cpus]; __u32 key; memset(values, 0, sizeof(values)); for (key = 0; key < SLOTS; key++) bpf_map_update_elem(fd, &key, values, BPF_ANY); } const char *color[] = { "\033[48;5;255m", "\033[48;5;252m", "\033[48;5;250m", "\033[48;5;248m", "\033[48;5;246m", "\033[48;5;244m", "\033[48;5;242m", "\033[48;5;240m", "\033[48;5;238m", "\033[48;5;236m", "\033[48;5;234m", "\033[48;5;232m", }; const int num_colors = ARRAY_SIZE(color); const char nocolor[] = "\033[00m"; const char *sym[] = { " ", " ", ".", ".", "*", "*", "o", "o", "O", "O", "#", "#", }; bool full_range = false; bool text_only = false; static void print_banner(void) { if (full_range) printf("|1ns |10ns |100ns |1us |10us |100us" " |1ms |10ms |100ms |1s |10s\n"); else printf("|1us |10us |100us |1ms |10ms " "|100ms |1s |10s\n"); } static void print_hist(int fd) { unsigned int nr_cpus = bpf_num_possible_cpus(); __u64 total_events = 0; long values[nr_cpus]; __u64 max_cnt = 0; __u64 cnt[SLOTS]; __u64 value; __u32 key; int i; for (key = 0; key < SLOTS; key++) { bpf_map_lookup_elem(fd, &key, values); value = 0; for (i = 0; i < nr_cpus; i++) value += values[i]; cnt[key] = value; total_events += value; if (value > max_cnt) max_cnt = value; } clear_stats(fd); for (key = full_range ? 0 : 29; key < SLOTS; key++) { int c = num_colors * cnt[key] / (max_cnt + 1); if (text_only) printf("%s", sym[c]); else printf("%s %s", color[c], nocolor); } printf(" # %lld\n", total_events); } int main(int ac, char **argv) { struct bpf_link *links[2]; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; int map_fd, i, j = 0; for (i = 1; i < ac; i++) { if (strcmp(argv[i], "-a") == 0) { full_range = true; } else if (strcmp(argv[i], "-t") == 0) { text_only = true; } else if (strcmp(argv[i], "-h") == 0) { printf("Usage:\n" " -a display wider latency range\n" " -t text only\n"); return 1; } } snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd = bpf_object__find_map_fd_by_name(obj, "lat_map"); if (map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { links[j] = bpf_program__attach(prog); if (libbpf_get_error(links[j])) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); links[j] = NULL; goto cleanup; } j++; } printf(" heatmap of IO latency\n"); if (text_only) printf(" %s", sym[num_colors - 1]); else printf(" %s %s", color[num_colors - 1], nocolor); printf(" - many events with this latency\n"); if (text_only) printf(" %s", sym[0]); else printf(" %s %s", color[0], nocolor); printf(" - few events\n"); for (i = 0; ; i++) { if (i % 20 == 0) print_banner(); print_hist(map_fd); sleep(2); } cleanup: for (j--; j >= 0; j--) bpf_link__destroy(links[j]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/tracex3_user.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Facebook */ #include <linux/bpf.h> #include <linux/if_link.h> #include <assert.h> #include <errno.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <net/if.h> #include <arpa/inet.h> #include <netinet/ether.h> #include <unistd.h> #include <time.h> #include <bpf/libbpf.h> #include <bpf/bpf.h> #include "bpf_util.h" #include "xdp_tx_iptunnel_common.h" #define STATS_INTERVAL_S 2U static int ifindex = -1; static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; static int rxcnt_map_fd; static __u32 prog_id; static void int_exit(int sig) { __u32 curr_prog_id = 0; if (ifindex > -1) { if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) { printf("bpf_xdp_query_id failed\n"); exit(1); } if (prog_id == curr_prog_id) bpf_xdp_detach(ifindex, xdp_flags, NULL); else if (!curr_prog_id) printf("couldn't find a prog id on a given iface\n"); else printf("program on interface changed, not removing\n"); } exit(0); } /* simple per-protocol drop counter */ static void poll_stats(unsigned int kill_after_s) { const unsigned int nr_protos = 256; unsigned int nr_cpus = bpf_num_possible_cpus(); time_t started_at = time(NULL); __u64 values[nr_cpus], prev[nr_protos][nr_cpus]; __u32 proto; int i; memset(prev, 0, sizeof(prev)); while (!kill_after_s || time(NULL) - started_at <= kill_after_s) { sleep(STATS_INTERVAL_S); for (proto = 0; proto < nr_protos; proto++) { __u64 sum = 0; assert(bpf_map_lookup_elem(rxcnt_map_fd, &proto, values) == 0); for (i = 0; i < nr_cpus; i++) sum += (values[i] - prev[proto][i]); if (sum) printf("proto %u: sum:%10llu pkts, rate:%10llu pkts/s\n", proto, sum, sum / STATS_INTERVAL_S); memcpy(prev[proto], values, sizeof(values)); } } } static void usage(const char *cmd) { printf("Start a XDP prog which encapsulates incoming packets\n" "in an IPv4/v6 header and XDP_TX it out. The dst <VIP:PORT>\n" "is used to select packets to encapsulate\n\n"); printf("Usage: %s [...]\n", cmd); printf(" -i <ifname|ifindex> Interface\n"); printf(" -a <vip-service-address> IPv4 or IPv6\n"); printf(" -p <vip-service-port> A port range (e.g. 433-444) is also allowed\n"); printf(" -s <source-ip> Used in the IPTunnel header\n"); printf(" -d <dest-ip> Used in the IPTunnel header\n"); printf(" -m <dest-MAC> Used in sending the IP Tunneled pkt\n"); printf(" -T <stop-after-X-seconds> Default: 0 (forever)\n"); printf(" -P <IP-Protocol> Default is TCP\n"); printf(" -S use skb-mode\n"); printf(" -N enforce native mode\n"); printf(" -F Force loading the XDP prog\n"); printf(" -h Display this help\n"); } static int parse_ipstr(const char *ipstr, unsigned int *addr) { if (inet_pton(AF_INET6, ipstr, addr) == 1) { return AF_INET6; } else if (inet_pton(AF_INET, ipstr, addr) == 1) { addr[1] = addr[2] = addr[3] = 0; return AF_INET; } fprintf(stderr, "%s is an invalid IP\n", ipstr); return AF_UNSPEC; } static int parse_ports(const char *port_str, int *min_port, int *max_port) { char *end; long tmp_min_port; long tmp_max_port; tmp_min_port = strtol(optarg, &end, 10); if (tmp_min_port < 1 || tmp_min_port > 65535) { fprintf(stderr, "Invalid port(s):%s\n", optarg); return 1; } if (*end == '-') { end++; tmp_max_port = strtol(end, NULL, 10); if (tmp_max_port < 1 || tmp_max_port > 65535) { fprintf(stderr, "Invalid port(s):%s\n", optarg); return 1; } } else { tmp_max_port = tmp_min_port; } if (tmp_min_port > tmp_max_port) { fprintf(stderr, "Invalid port(s):%s\n", optarg); return 1; } if (tmp_max_port - tmp_min_port + 1 > MAX_IPTNL_ENTRIES) { fprintf(stderr, "Port range (%s) is larger than %u\n", port_str, MAX_IPTNL_ENTRIES); return 1; } *min_port = tmp_min_port; *max_port = tmp_max_port; return 0; } int main(int argc, char **argv) { int min_port = 0, max_port = 0, vip2tnl_map_fd; const char *optstr = "i:a:p:s:d:m:T:P:FSNh"; unsigned char opt_flags[256] = {}; struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); unsigned int kill_after_s = 0; struct iptnl_info tnl = {}; struct bpf_program *prog; struct bpf_object *obj; struct vip vip = {}; char filename[256]; int opt, prog_fd; int i, err; tnl.family = AF_UNSPEC; vip.protocol = IPPROTO_TCP; for (i = 0; i < strlen(optstr); i++) if (optstr[i] != 'h' && 'a' <= optstr[i] && optstr[i] <= 'z') opt_flags[(unsigned char)optstr[i]] = 1; while ((opt = getopt(argc, argv, optstr)) != -1) { unsigned short family; unsigned int *v6; switch (opt) { case 'i': ifindex = if_nametoindex(optarg); if (!ifindex) ifindex = atoi(optarg); break; case 'a': vip.family = parse_ipstr(optarg, vip.daddr.v6); if (vip.family == AF_UNSPEC) return 1; break; case 'p': if (parse_ports(optarg, &min_port, &max_port)) return 1; break; case 'P': vip.protocol = atoi(optarg); break; case 's': case 'd': if (opt == 's') v6 = tnl.saddr.v6; else v6 = tnl.daddr.v6; family = parse_ipstr(optarg, v6); if (family == AF_UNSPEC) return 1; if (tnl.family == AF_UNSPEC) { tnl.family = family; } else if (tnl.family != family) { fprintf(stderr, "The IP version of the src and dst addresses used in the IP encapsulation does not match\n"); return 1; } break; case 'm': if (!ether_aton_r(optarg, (struct ether_addr *)tnl.dmac)) { fprintf(stderr, "Invalid mac address:%s\n", optarg); return 1; } break; case 'T': kill_after_s = atoi(optarg); break; case 'S': xdp_flags |= XDP_FLAGS_SKB_MODE; break; case 'N': /* default, set below */ break; case 'F': xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; break; default: usage(argv[0]); return 1; } opt_flags[opt] = 0; } if (!(xdp_flags & XDP_FLAGS_SKB_MODE)) xdp_flags |= XDP_FLAGS_DRV_MODE; for (i = 0; i < strlen(optstr); i++) { if (opt_flags[(unsigned int)optstr[i]]) { fprintf(stderr, "Missing argument -%c\n", optstr[i]); usage(argv[0]); return 1; } } if (!ifindex) { fprintf(stderr, "Invalid ifname\n"); return 1; } snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) return 1; prog = bpf_object__next_program(obj, NULL); bpf_program__set_type(prog, BPF_PROG_TYPE_XDP); err = bpf_object__load(obj); if (err) { printf("bpf_object__load(): %s\n", strerror(errno)); return 1; } prog_fd = bpf_program__fd(prog); rxcnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rxcnt"); vip2tnl_map_fd = bpf_object__find_map_fd_by_name(obj, "vip2tnl"); if (vip2tnl_map_fd < 0 || rxcnt_map_fd < 0) { printf("bpf_object__find_map_fd_by_name failed\n"); return 1; } signal(SIGINT, int_exit); signal(SIGTERM, int_exit); while (min_port <= max_port) { vip.dport = htons(min_port++); if (bpf_map_update_elem(vip2tnl_map_fd, &vip, &tnl, BPF_NOEXIST)) { perror("bpf_map_update_elem(&vip2tnl)"); return 1; } } if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) { printf("link set xdp fd failed\n"); return 1; } err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); if (err) { printf("can't get prog info - %s\n", strerror(errno)); return err; } prog_id = info.id; poll_stats(kill_after_s); bpf_xdp_detach(ifindex, xdp_flags, NULL); return 0; }
linux-master
samples/bpf/xdp_tx_iptunnel_user.c
/* Copyright (c) 2017 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * BPF program to set base_rtt to 80us when host is running TCP-NV and * both hosts are in the same datacenter (as determined by IPv6 prefix). * * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program. */ #include <uapi/linux/bpf.h> #include <uapi/linux/tcp.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <linux/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #define DEBUG 1 SEC("sockops") int bpf_basertt(struct bpf_sock_ops *skops) { char cong[20]; char nv[] = "nv"; int rv = 0, n; int op; op = (int) skops->op; #ifdef DEBUG bpf_printk("BPF command: %d\n", op); #endif /* Check if both hosts are in the same datacenter. For this * example they are if the 1st 5.5 bytes in the IPv6 address * are the same. */ if (skops->family == AF_INET6 && skops->local_ip6[0] == skops->remote_ip6[0] && (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) == (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) { switch (op) { case BPF_SOCK_OPS_BASE_RTT: n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION, cong, sizeof(cong)); if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) { /* Set base_rtt to 80us */ rv = 80; } else if (n) { rv = n; } else { rv = -1; } break; default: rv = -1; } } else { rv = -1; } #ifdef DEBUG bpf_printk("Returning %d\n", rv); #endif skops->reply = rv; return 1; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/tcp_basertt_kern.c
/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> struct pair { u64 val; u64 ip; }; struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, long); __type(value, struct pair); __uint(max_entries, 1000000); } my_map SEC(".maps"); /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe * example will no longer be meaningful */ SEC("kprobe/kmem_cache_free") int bpf_prog1(struct pt_regs *ctx) { long ptr = PT_REGS_PARM2(ctx); bpf_map_delete_elem(&my_map, &ptr); return 0; } SEC("kretprobe/kmem_cache_alloc_node") int bpf_prog2(struct pt_regs *ctx) { long ptr = PT_REGS_RC(ctx); long ip = 0; /* get ip address of kmem_cache_alloc_node() caller */ BPF_KRETPROBE_READ_RET_IP(ip, ctx); struct pair v = { .val = bpf_ktime_get_ns(), .ip = ip, }; bpf_map_update_elem(&my_map, &ptr, &v, BPF_ANY); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex4.bpf.c
// SPDX-License-Identifier: GPL-2.0-only #define _GNU_SOURCE #include <arpa/inet.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <linux/ethtool.h> #include <linux/hashtable.h> #include <linux/if_link.h> #include <linux/jhash.h> #include <linux/limits.h> #include <linux/list.h> #include <linux/sockios.h> #include <locale.h> #include <math.h> #include <net/if.h> #include <poll.h> #include <signal.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/signalfd.h> #include <sys/sysinfo.h> #include <sys/timerfd.h> #include <sys/utsname.h> #include <time.h> #include <unistd.h> #include "bpf_util.h" #include "xdp_sample_user.h" #define __sample_print(fmt, cond, ...) \ ({ \ if (cond) \ printf(fmt, ##__VA_ARGS__); \ }) #define print_always(fmt, ...) __sample_print(fmt, 1, ##__VA_ARGS__) #define print_default(fmt, ...) \ __sample_print(fmt, sample_log_level & LL_DEFAULT, ##__VA_ARGS__) #define __print_err(err, fmt, ...) \ ({ \ __sample_print(fmt, err > 0 || sample_log_level & LL_DEFAULT, \ ##__VA_ARGS__); \ sample_err_exp = sample_err_exp ? true : err > 0; \ }) #define print_err(err, fmt, ...) __print_err(err, fmt, ##__VA_ARGS__) #define __COLUMN(x) "%'10" x " %-13s" #define FMT_COLUMNf __COLUMN(".0f") #define FMT_COLUMNd __COLUMN("d") #define FMT_COLUMNl __COLUMN("llu") #define RX(rx) rx, "rx/s" #define PPS(pps) pps, "pkt/s" #define DROP(drop) drop, "drop/s" #define ERR(err) err, "error/s" #define HITS(hits) hits, "hit/s" #define XMIT(xmit) xmit, "xmit/s" #define PASS(pass) pass, "pass/s" #define REDIR(redir) redir, "redir/s" #define NANOSEC_PER_SEC 1000000000 /* 10^9 */ #define XDP_UNKNOWN (XDP_REDIRECT + 1) #define XDP_ACTION_MAX (XDP_UNKNOWN + 1) #define XDP_REDIRECT_ERR_MAX 7 enum map_type { MAP_RX, MAP_REDIRECT_ERR, MAP_CPUMAP_ENQUEUE, MAP_CPUMAP_KTHREAD, MAP_EXCEPTION, MAP_DEVMAP_XMIT, MAP_DEVMAP_XMIT_MULTI, NUM_MAP, }; enum log_level { LL_DEFAULT = 1U << 0, LL_SIMPLE = 1U << 1, LL_DEBUG = 1U << 2, }; struct record { __u64 timestamp; struct datarec total; struct datarec *cpu; }; struct map_entry { struct hlist_node node; __u64 pair; struct record val; }; struct stats_record { struct record rx_cnt; struct record redir_err[XDP_REDIRECT_ERR_MAX]; struct record kthread; struct record exception[XDP_ACTION_MAX]; struct record devmap_xmit; DECLARE_HASHTABLE(xmit_map, 5); struct record enq[]; }; struct sample_output { struct { __u64 rx; __u64 redir; __u64 drop; __u64 drop_xmit; __u64 err; __u64 xmit; } totals; struct { union { __u64 pps; __u64 num; }; __u64 drop; __u64 err; } rx_cnt; struct { __u64 suc; __u64 err; } redir_cnt; struct { __u64 hits; } except_cnt; struct { __u64 pps; __u64 drop; __u64 err; double bavg; } xmit_cnt; }; struct xdp_desc { int ifindex; __u32 prog_id; int flags; } sample_xdp_progs[32]; struct datarec *sample_mmap[NUM_MAP]; struct bpf_map *sample_map[NUM_MAP]; size_t sample_map_count[NUM_MAP]; enum log_level sample_log_level; struct sample_output sample_out; unsigned long sample_interval; bool sample_err_exp; int sample_xdp_cnt; int sample_n_cpus; int sample_sig_fd; int sample_mask; static const char *xdp_redirect_err_names[XDP_REDIRECT_ERR_MAX] = { /* Key=1 keeps unknown errors */ "Success", "Unknown", "EINVAL", "ENETDOWN", "EMSGSIZE", "EOPNOTSUPP", "ENOSPC", }; /* Keyed from Unknown */ static const char *xdp_redirect_err_help[XDP_REDIRECT_ERR_MAX - 1] = { "Unknown error", "Invalid redirection", "Device being redirected to is down", "Packet length too large for device", "Operation not supported", "No space in ptr_ring of cpumap kthread", }; static const char *xdp_action_names[XDP_ACTION_MAX] = { [XDP_ABORTED] = "XDP_ABORTED", [XDP_DROP] = "XDP_DROP", [XDP_PASS] = "XDP_PASS", [XDP_TX] = "XDP_TX", [XDP_REDIRECT] = "XDP_REDIRECT", [XDP_UNKNOWN] = "XDP_UNKNOWN", }; static __u64 gettime(void) { struct timespec t; int res; res = clock_gettime(CLOCK_MONOTONIC, &t); if (res < 0) { fprintf(stderr, "Error with gettimeofday! (%i)\n", res); return UINT64_MAX; } return (__u64)t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; } static const char *action2str(int action) { if (action < XDP_ACTION_MAX) return xdp_action_names[action]; return NULL; } static void sample_print_help(int mask) { printf("Output format description\n\n" "By default, redirect success statistics are disabled, use -s to enable.\n" "The terse output mode is default, verbose mode can be activated using -v\n" "Use SIGQUIT (Ctrl + \\) to switch the mode dynamically at runtime\n\n" "Terse mode displays at most the following fields:\n" " rx/s Number of packets received per second\n" " redir/s Number of packets successfully redirected per second\n" " err,drop/s Aggregated count of errors per second (including dropped packets)\n" " xmit/s Number of packets transmitted on the output device per second\n\n" "Output description for verbose mode:\n" " FIELD DESCRIPTION\n"); if (mask & SAMPLE_RX_CNT) { printf(" receive\t\tDisplays the number of packets received & errors encountered\n" " \t\t\tWhenever an error or packet drop occurs, details of per CPU error\n" " \t\t\tand drop statistics will be expanded inline in terse mode.\n" " \t\t\t\tpkt/s - Packets received per second\n" " \t\t\t\tdrop/s - Packets dropped per second\n" " \t\t\t\terror/s - Errors encountered per second\n\n"); } if (mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) { printf(" redirect\t\tDisplays the number of packets successfully redirected\n" " \t\t\tErrors encountered are expanded under redirect_err field\n" " \t\t\tNote that passing -s to enable it has a per packet overhead\n" " \t\t\t\tredir/s - Packets redirected successfully per second\n\n" " redirect_err\t\tDisplays the number of packets that failed redirection\n" " \t\t\tThe errno is expanded under this field with per CPU count\n" " \t\t\tThe recognized errors are:\n"); for (int i = 2; i < XDP_REDIRECT_ERR_MAX; i++) printf("\t\t\t %s: %s\n", xdp_redirect_err_names[i], xdp_redirect_err_help[i - 1]); printf(" \n\t\t\t\terror/s - Packets that failed redirection per second\n\n"); } if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) { printf(" enqueue to cpu N\tDisplays the number of packets enqueued to bulk queue of CPU N\n" " \t\t\tExpands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N\n" " \t\t\tReceived packets can be associated with the CPU redirect program is enqueuing \n" " \t\t\tpackets to.\n" " \t\t\t\tpkt/s - Packets enqueued per second from other CPU to CPU N\n" " \t\t\t\tdrop/s - Packets dropped when trying to enqueue to CPU N\n" " \t\t\t\tbulk-avg - Average number of packets processed for each event\n\n"); } if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) { printf(" kthread\t\tDisplays the number of packets processed in CPUMAP kthread for each CPU\n" " \t\t\tPackets consumed from ptr_ring in kthread, and its xdp_stats (after calling \n" " \t\t\tCPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and\n" " \t\t\tthen per-CPU to associate it to each CPU's pinned CPUMAP kthread.\n" " \t\t\t\tpkt/s - Packets consumed per second from ptr_ring\n" " \t\t\t\tdrop/s - Packets dropped per second in kthread\n" " \t\t\t\tsched - Number of times kthread called schedule()\n\n" " \t\t\txdp_stats (also expands to per-CPU counts)\n" " \t\t\t\tpass/s - XDP_PASS count for CPUMAP program execution\n" " \t\t\t\tdrop/s - XDP_DROP count for CPUMAP program execution\n" " \t\t\t\tredir/s - XDP_REDIRECT count for CPUMAP program execution\n\n"); } if (mask & SAMPLE_EXCEPTION_CNT) { printf(" xdp_exception\t\tDisplays xdp_exception tracepoint events\n" " \t\t\tThis can occur due to internal driver errors, unrecognized\n" " \t\t\tXDP actions and due to explicit user trigger by use of XDP_ABORTED\n" " \t\t\tEach action is expanded below this field with its count\n" " \t\t\t\thit/s - Number of times the tracepoint was hit per second\n\n"); } if (mask & SAMPLE_DEVMAP_XMIT_CNT) { printf(" devmap_xmit\t\tDisplays devmap_xmit tracepoint events\n" " \t\t\tThis tracepoint is invoked for successful transmissions on output\n" " \t\t\tdevice but these statistics are not available for generic XDP mode,\n" " \t\t\thence they will be omitted from the output when using SKB mode\n" " \t\t\t\txmit/s - Number of packets that were transmitted per second\n" " \t\t\t\tdrop/s - Number of packets that failed transmissions per second\n" " \t\t\t\tdrv_err/s - Number of internal driver errors per second\n" " \t\t\t\tbulk-avg - Average number of packets processed for each event\n\n"); } } void sample_usage(char *argv[], const struct option *long_options, const char *doc, int mask, bool error) { int i; if (!error) sample_print_help(mask); printf("\n%s\nOption for %s:\n", doc, argv[0]); for (i = 0; long_options[i].name != 0; i++) { printf(" --%-15s", long_options[i].name); if (long_options[i].flag != NULL) printf(" flag (internal value: %d)", *long_options[i].flag); else printf("\t short-option: -%c", long_options[i].val); printf("\n"); } printf("\n"); } static struct datarec *alloc_record_per_cpu(void) { unsigned int nr_cpus = libbpf_num_possible_cpus(); struct datarec *array; array = calloc(nr_cpus, sizeof(*array)); if (!array) { fprintf(stderr, "Failed to allocate memory (nr_cpus: %u)\n", nr_cpus); return NULL; } return array; } static int map_entry_init(struct map_entry *e, __u64 pair) { e->pair = pair; INIT_HLIST_NODE(&e->node); e->val.timestamp = gettime(); e->val.cpu = alloc_record_per_cpu(); if (!e->val.cpu) return -ENOMEM; return 0; } static void map_collect_percpu(struct datarec *values, struct record *rec) { /* For percpu maps, userspace gets a value per possible CPU */ unsigned int nr_cpus = libbpf_num_possible_cpus(); __u64 sum_xdp_redirect = 0; __u64 sum_processed = 0; __u64 sum_xdp_pass = 0; __u64 sum_xdp_drop = 0; __u64 sum_dropped = 0; __u64 sum_issue = 0; int i; /* Get time as close as possible to reading map contents */ rec->timestamp = gettime(); /* Record and sum values from each CPU */ for (i = 0; i < nr_cpus; i++) { rec->cpu[i].processed = READ_ONCE(values[i].processed); rec->cpu[i].dropped = READ_ONCE(values[i].dropped); rec->cpu[i].issue = READ_ONCE(values[i].issue); rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass); rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop); rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect); sum_processed += rec->cpu[i].processed; sum_dropped += rec->cpu[i].dropped; sum_issue += rec->cpu[i].issue; sum_xdp_pass += rec->cpu[i].xdp_pass; sum_xdp_drop += rec->cpu[i].xdp_drop; sum_xdp_redirect += rec->cpu[i].xdp_redirect; } rec->total.processed = sum_processed; rec->total.dropped = sum_dropped; rec->total.issue = sum_issue; rec->total.xdp_pass = sum_xdp_pass; rec->total.xdp_drop = sum_xdp_drop; rec->total.xdp_redirect = sum_xdp_redirect; } static int map_collect_percpu_devmap(int map_fd, struct stats_record *rec) { unsigned int nr_cpus = bpf_num_possible_cpus(); __u32 batch, count = 32; struct datarec *values; bool init = false; __u64 *keys; int i, ret; keys = calloc(count, sizeof(__u64)); if (!keys) return -ENOMEM; values = calloc(count * nr_cpus, sizeof(struct datarec)); if (!values) { free(keys); return -ENOMEM; } for (;;) { bool exit = false; ret = bpf_map_lookup_batch(map_fd, init ? &batch : NULL, &batch, keys, values, &count, NULL); if (ret < 0 && errno != ENOENT) break; if (errno == ENOENT) exit = true; init = true; for (i = 0; i < count; i++) { struct map_entry *e, *x = NULL; __u64 pair = keys[i]; struct datarec *arr; arr = &values[i * nr_cpus]; hash_for_each_possible(rec->xmit_map, e, node, pair) { if (e->pair == pair) { x = e; break; } } if (!x) { x = calloc(1, sizeof(*x)); if (!x) goto cleanup; if (map_entry_init(x, pair) < 0) { free(x); goto cleanup; } hash_add(rec->xmit_map, &x->node, pair); } map_collect_percpu(arr, &x->val); } if (exit) break; count = 32; } free(values); free(keys); return 0; cleanup: free(values); free(keys); return -ENOMEM; } static struct stats_record *alloc_stats_record(void) { struct stats_record *rec; int i; rec = calloc(1, sizeof(*rec) + sample_n_cpus * sizeof(struct record)); if (!rec) { fprintf(stderr, "Failed to allocate memory\n"); return NULL; } if (sample_mask & SAMPLE_RX_CNT) { rec->rx_cnt.cpu = alloc_record_per_cpu(); if (!rec->rx_cnt.cpu) { fprintf(stderr, "Failed to allocate rx_cnt per-CPU array\n"); goto end_rec; } } if (sample_mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) { for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) { rec->redir_err[i].cpu = alloc_record_per_cpu(); if (!rec->redir_err[i].cpu) { fprintf(stderr, "Failed to allocate redir_err per-CPU array for " "\"%s\" case\n", xdp_redirect_err_names[i]); while (i--) free(rec->redir_err[i].cpu); goto end_rx_cnt; } } } if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) { rec->kthread.cpu = alloc_record_per_cpu(); if (!rec->kthread.cpu) { fprintf(stderr, "Failed to allocate kthread per-CPU array\n"); goto end_redir; } } if (sample_mask & SAMPLE_EXCEPTION_CNT) { for (i = 0; i < XDP_ACTION_MAX; i++) { rec->exception[i].cpu = alloc_record_per_cpu(); if (!rec->exception[i].cpu) { fprintf(stderr, "Failed to allocate exception per-CPU array for " "\"%s\" case\n", action2str(i)); while (i--) free(rec->exception[i].cpu); goto end_kthread; } } } if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) { rec->devmap_xmit.cpu = alloc_record_per_cpu(); if (!rec->devmap_xmit.cpu) { fprintf(stderr, "Failed to allocate devmap_xmit per-CPU array\n"); goto end_exception; } } if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) hash_init(rec->xmit_map); if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) { for (i = 0; i < sample_n_cpus; i++) { rec->enq[i].cpu = alloc_record_per_cpu(); if (!rec->enq[i].cpu) { fprintf(stderr, "Failed to allocate enqueue per-CPU array for " "CPU %d\n", i); while (i--) free(rec->enq[i].cpu); goto end_devmap_xmit; } } } return rec; end_devmap_xmit: free(rec->devmap_xmit.cpu); end_exception: for (i = 0; i < XDP_ACTION_MAX; i++) free(rec->exception[i].cpu); end_kthread: free(rec->kthread.cpu); end_redir: for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) free(rec->redir_err[i].cpu); end_rx_cnt: free(rec->rx_cnt.cpu); end_rec: free(rec); return NULL; } static void free_stats_record(struct stats_record *r) { struct hlist_node *tmp; struct map_entry *e; int i; for (i = 0; i < sample_n_cpus; i++) free(r->enq[i].cpu); hash_for_each_safe(r->xmit_map, i, tmp, e, node) { hash_del(&e->node); free(e->val.cpu); free(e); } free(r->devmap_xmit.cpu); for (i = 0; i < XDP_ACTION_MAX; i++) free(r->exception[i].cpu); free(r->kthread.cpu); for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) free(r->redir_err[i].cpu); free(r->rx_cnt.cpu); free(r); } static double calc_period(struct record *r, struct record *p) { double period_ = 0; __u64 period = 0; period = r->timestamp - p->timestamp; if (period > 0) period_ = ((double)period / NANOSEC_PER_SEC); return period_; } static double sample_round(double val) { if (val - floor(val) < 0.5) return floor(val); return ceil(val); } static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->processed - p->processed; pps = sample_round(packets / period_); } return pps; } static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->dropped - p->dropped; pps = sample_round(packets / period_); } return pps; } static __u64 calc_errs_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->issue - p->issue; pps = sample_round(packets / period_); } return pps; } static __u64 calc_info_pps(struct datarec *r, struct datarec *p, double period_) { __u64 packets = 0; __u64 pps = 0; if (period_ > 0) { packets = r->info - p->info; pps = sample_round(packets / period_); } return pps; } static void calc_xdp_pps(struct datarec *r, struct datarec *p, double *xdp_pass, double *xdp_drop, double *xdp_redirect, double period_) { *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0; if (period_ > 0) { *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_; *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_; *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_; } } static void stats_get_rx_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out) { struct record *rec, *prev; double t, pps, drop, err; int i; rec = &stats_rec->rx_cnt; prev = &stats_prev->rx_cnt; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PPS(pps), DROP(drop), ERR(err)); } if (out) { pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); out->rx_cnt.pps = pps; out->rx_cnt.drop = drop; out->rx_cnt.err = err; out->totals.rx += pps; out->totals.drop += drop; out->totals.err += err; } } static void stats_get_cpumap_enqueue(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus) { struct record *rec, *prev; double t, pps, drop, err; int i, to_cpu; /* cpumap enqueue stats */ for (to_cpu = 0; to_cpu < sample_n_cpus; to_cpu++) { rec = &stats_rec->enq[to_cpu]; prev = &stats_prev->enq[to_cpu]; t = calc_period(rec, prev); pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); if (pps > 0 || drop > 0) { char str[64]; snprintf(str, sizeof(str), "enqueue to cpu %d", to_cpu); if (err > 0) err = pps / err; /* calc average bulk size */ print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf __COLUMN( ".2f") "\n", str, PPS(pps), DROP(drop), err, "bulk-avg"); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d->%d", i, to_cpu); if (err > 0) err = pps / err; /* calc average bulk size */ print_default( " %-18s " FMT_COLUMNf FMT_COLUMNf __COLUMN( ".2f") "\n", str, PPS(pps), DROP(drop), err, "bulk-avg"); } } } static void stats_get_cpumap_remote(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus) { double xdp_pass, xdp_drop, xdp_redirect; struct record *rec, *prev; double t; int i; rec = &stats_rec->kthread; prev = &stats_prev->kthread; t = calc_period(rec, prev); calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop, &xdp_redirect, t); if (xdp_pass || xdp_drop || xdp_redirect) { print_err(xdp_drop, " %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", "xdp_stats", PASS(xdp_pass), DROP(xdp_drop), REDIR(xdp_redirect)); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; calc_xdp_pps(r, p, &xdp_pass, &xdp_drop, &xdp_redirect, t); if (!xdp_pass && !xdp_drop && !xdp_redirect) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PASS(xdp_pass), DROP(xdp_drop), REDIR(xdp_redirect)); } } static void stats_get_cpumap_kthread(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus) { struct record *rec, *prev; double t, pps, drop, err; int i; rec = &stats_rec->kthread; prev = &stats_prev->kthread; t = calc_period(rec, prev); pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); err = calc_errs_pps(&rec->total, &prev->total, t); print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", pps ? "kthread total" : "kthread", PPS(pps), DROP(drop), err, "sched"); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n", str, PPS(pps), DROP(drop), err, "sched"); } } static void stats_get_redirect_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out) { struct record *rec, *prev; double t, pps; int i; rec = &stats_rec->redir_err[0]; prev = &stats_prev->redir_err[0]; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); if (!pps) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-18s " FMT_COLUMNf "\n", str, REDIR(pps)); } if (out) { pps = calc_pps(&rec->total, &prev->total, t); out->redir_cnt.suc = pps; out->totals.redir += pps; } } static void stats_get_redirect_err_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out) { struct record *rec, *prev; double t, drop, sum = 0; int rec_i, i; for (rec_i = 1; rec_i < XDP_REDIRECT_ERR_MAX; rec_i++) { char str[64]; rec = &stats_rec->redir_err[rec_i]; prev = &stats_prev->redir_err[rec_i]; t = calc_period(rec, prev); drop = calc_drop_pps(&rec->total, &prev->total, t); if (drop > 0 && !out) { snprintf(str, sizeof(str), sample_log_level & LL_DEFAULT ? "%s total" : "%s", xdp_redirect_err_names[rec_i]); print_err(drop, " %-18s " FMT_COLUMNf "\n", str, ERR(drop)); } for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; double drop; drop = calc_drop_pps(r, p, t); if (!drop) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s" FMT_COLUMNf "\n", str, ERR(drop)); } sum += drop; } if (out) { out->redir_cnt.err = sum; out->totals.err += sum; } } static void stats_get_exception_cnt(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out) { double t, drop, sum = 0; struct record *rec, *prev; int rec_i, i; for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) { rec = &stats_rec->exception[rec_i]; prev = &stats_prev->exception[rec_i]; t = calc_period(rec, prev); drop = calc_drop_pps(&rec->total, &prev->total, t); /* Fold out errors after heading */ sum += drop; if (drop > 0 && !out) { print_always(" %-18s " FMT_COLUMNf "\n", action2str(rec_i), ERR(drop)); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; double drop; drop = calc_drop_pps(r, p, t); if (!drop) continue; snprintf(str, sizeof(str), "cpu:%d", i); print_default(" %-16s" FMT_COLUMNf "\n", str, ERR(drop)); } } } if (out) { out->except_cnt.hits = sum; out->totals.err += sum; } } static void stats_get_devmap_xmit(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out) { double pps, drop, info, err; struct record *rec, *prev; double t; int i; rec = &stats_rec->devmap_xmit; prev = &stats_prev->devmap_xmit; t = calc_period(rec, prev); for (i = 0; i < nr_cpus; i++) { struct datarec *r = &rec->cpu[i]; struct datarec *p = &prev->cpu[i]; char str[64]; pps = calc_pps(r, p, t); drop = calc_drop_pps(r, p, t); err = calc_errs_pps(r, p, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); info = calc_info_pps(r, p, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } if (out) { pps = calc_pps(&rec->total, &prev->total, t); drop = calc_drop_pps(&rec->total, &prev->total, t); info = calc_info_pps(&rec->total, &prev->total, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ err = calc_errs_pps(&rec->total, &prev->total, t); out->xmit_cnt.pps = pps; out->xmit_cnt.drop = drop; out->xmit_cnt.bavg = info; out->xmit_cnt.err = err; out->totals.xmit += pps; out->totals.drop_xmit += drop; out->totals.err += err; } } static void stats_get_devmap_xmit_multi(struct stats_record *stats_rec, struct stats_record *stats_prev, unsigned int nr_cpus, struct sample_output *out, bool xmit_total) { double pps, drop, info, err; struct map_entry *entry; struct record *r, *p; double t; int bkt; hash_for_each(stats_rec->xmit_map, bkt, entry, node) { struct map_entry *e, *x = NULL; char ifname_from[IFNAMSIZ]; char ifname_to[IFNAMSIZ]; const char *fstr, *tstr; unsigned long prev_time; struct record beg = {}; __u32 from_idx, to_idx; char str[128]; __u64 pair; int i; prev_time = sample_interval * NANOSEC_PER_SEC; pair = entry->pair; from_idx = pair >> 32; to_idx = pair & 0xFFFFFFFF; r = &entry->val; beg.timestamp = r->timestamp - prev_time; /* Find matching entry from stats_prev map */ hash_for_each_possible(stats_prev->xmit_map, e, node, pair) { if (e->pair == pair) { x = e; break; } } if (x) p = &x->val; else p = &beg; t = calc_period(r, p); pps = calc_pps(&r->total, &p->total, t); drop = calc_drop_pps(&r->total, &p->total, t); info = calc_info_pps(&r->total, &p->total, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ err = calc_errs_pps(&r->total, &p->total, t); if (out) { /* We are responsible for filling out totals */ out->totals.xmit += pps; out->totals.drop_xmit += drop; out->totals.err += err; continue; } fstr = tstr = NULL; if (if_indextoname(from_idx, ifname_from)) fstr = ifname_from; if (if_indextoname(to_idx, ifname_to)) tstr = ifname_to; snprintf(str, sizeof(str), "xmit %s->%s", fstr ?: "?", tstr ?: "?"); /* Skip idle streams of redirection */ if (pps || drop || err) { print_err(drop, " %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } for (i = 0; i < nr_cpus; i++) { struct datarec *rc = &r->cpu[i]; struct datarec *pc, p_beg = {}; char str[64]; pc = p == &beg ? &p_beg : &p->cpu[i]; pps = calc_pps(rc, pc, t); drop = calc_drop_pps(rc, pc, t); err = calc_errs_pps(rc, pc, t); if (!pps && !drop && !err) continue; snprintf(str, sizeof(str), "cpu:%d", i); info = calc_info_pps(rc, pc, t); if (info > 0) info = (pps + drop) / info; /* calc avg bulk */ print_default(" %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop), err, "drv_err/s", info, "bulk-avg"); } } } static void stats_print(const char *prefix, int mask, struct stats_record *r, struct stats_record *p, struct sample_output *out) { int nr_cpus = libbpf_num_possible_cpus(); const char *str; print_always("%-23s", prefix ?: "Summary"); if (mask & SAMPLE_RX_CNT) print_always(FMT_COLUMNl, RX(out->totals.rx)); if (mask & SAMPLE_REDIRECT_CNT) print_always(FMT_COLUMNl, REDIR(out->totals.redir)); printf(FMT_COLUMNl, out->totals.err + out->totals.drop + out->totals.drop_xmit, "err,drop/s"); if (mask & SAMPLE_DEVMAP_XMIT_CNT || mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) printf(FMT_COLUMNl, XMIT(out->totals.xmit)); printf("\n"); if (mask & SAMPLE_RX_CNT) { str = (sample_log_level & LL_DEFAULT) && out->rx_cnt.pps ? "receive total" : "receive"; print_err((out->rx_cnt.err || out->rx_cnt.drop), " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl "\n", str, PPS(out->rx_cnt.pps), DROP(out->rx_cnt.drop), ERR(out->rx_cnt.err)); stats_get_rx_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) stats_get_cpumap_enqueue(r, p, nr_cpus); if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) { stats_get_cpumap_kthread(r, p, nr_cpus); stats_get_cpumap_remote(r, p, nr_cpus); } if (mask & SAMPLE_REDIRECT_CNT) { str = out->redir_cnt.suc ? "redirect total" : "redirect"; print_default(" %-20s " FMT_COLUMNl "\n", str, REDIR(out->redir_cnt.suc)); stats_get_redirect_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_REDIRECT_ERR_CNT) { str = (sample_log_level & LL_DEFAULT) && out->redir_cnt.err ? "redirect_err total" : "redirect_err"; print_err(out->redir_cnt.err, " %-20s " FMT_COLUMNl "\n", str, ERR(out->redir_cnt.err)); stats_get_redirect_err_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_EXCEPTION_CNT) { str = out->except_cnt.hits ? "xdp_exception total" : "xdp_exception"; print_err(out->except_cnt.hits, " %-20s " FMT_COLUMNl "\n", str, HITS(out->except_cnt.hits)); stats_get_exception_cnt(r, p, nr_cpus, NULL); } if (mask & SAMPLE_DEVMAP_XMIT_CNT) { str = (sample_log_level & LL_DEFAULT) && out->xmit_cnt.pps ? "devmap_xmit total" : "devmap_xmit"; print_err(out->xmit_cnt.err || out->xmit_cnt.drop, " %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl __COLUMN(".2f") "\n", str, XMIT(out->xmit_cnt.pps), DROP(out->xmit_cnt.drop), out->xmit_cnt.err, "drv_err/s", out->xmit_cnt.bavg, "bulk-avg"); stats_get_devmap_xmit(r, p, nr_cpus, NULL); } if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) stats_get_devmap_xmit_multi(r, p, nr_cpus, NULL, mask & SAMPLE_DEVMAP_XMIT_CNT); if (sample_log_level & LL_DEFAULT || ((sample_log_level & LL_SIMPLE) && sample_err_exp)) { sample_err_exp = false; printf("\n"); } } int sample_setup_maps(struct bpf_map **maps) { sample_n_cpus = libbpf_num_possible_cpus(); for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { sample_map[i] = maps[i]; switch (i) { case MAP_RX: case MAP_CPUMAP_KTHREAD: case MAP_DEVMAP_XMIT: sample_map_count[i] = sample_n_cpus; break; case MAP_REDIRECT_ERR: sample_map_count[i] = XDP_REDIRECT_ERR_MAX * sample_n_cpus; break; case MAP_EXCEPTION: sample_map_count[i] = XDP_ACTION_MAX * sample_n_cpus; case MAP_CPUMAP_ENQUEUE: sample_map_count[i] = sample_n_cpus * sample_n_cpus; break; default: return -EINVAL; } if (bpf_map__set_max_entries(sample_map[i], sample_map_count[i]) < 0) return -errno; } sample_map[MAP_DEVMAP_XMIT_MULTI] = maps[MAP_DEVMAP_XMIT_MULTI]; return 0; } static int sample_setup_maps_mappings(void) { for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) { size_t size = sample_map_count[i] * sizeof(struct datarec); sample_mmap[i] = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, bpf_map__fd(sample_map[i]), 0); if (sample_mmap[i] == MAP_FAILED) return -errno; } return 0; } int __sample_init(int mask) { sigset_t st; sigemptyset(&st); sigaddset(&st, SIGQUIT); sigaddset(&st, SIGINT); sigaddset(&st, SIGTERM); if (sigprocmask(SIG_BLOCK, &st, NULL) < 0) return -errno; sample_sig_fd = signalfd(-1, &st, SFD_CLOEXEC | SFD_NONBLOCK); if (sample_sig_fd < 0) return -errno; sample_mask = mask; return sample_setup_maps_mappings(); } static int __sample_remove_xdp(int ifindex, __u32 prog_id, int xdp_flags) { __u32 cur_prog_id = 0; int ret; if (prog_id) { ret = bpf_xdp_query_id(ifindex, xdp_flags, &cur_prog_id); if (ret < 0) return -errno; if (prog_id != cur_prog_id) { print_always( "Program on ifindex %d does not match installed " "program, skipping unload\n", ifindex); return -ENOENT; } } return bpf_xdp_detach(ifindex, xdp_flags, NULL); } int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic, bool force) { int ret, xdp_flags = 0; __u32 prog_id = 0; if (sample_xdp_cnt == 32) { fprintf(stderr, "Total limit for installed XDP programs in a sample reached\n"); return -ENOTSUP; } xdp_flags |= !force ? XDP_FLAGS_UPDATE_IF_NOEXIST : 0; xdp_flags |= generic ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; ret = bpf_xdp_attach(ifindex, bpf_program__fd(xdp_prog), xdp_flags, NULL); if (ret < 0) { ret = -errno; fprintf(stderr, "Failed to install program \"%s\" on ifindex %d, mode = %s, " "force = %s: %s\n", bpf_program__name(xdp_prog), ifindex, generic ? "skb" : "native", force ? "true" : "false", strerror(-ret)); return ret; } ret = bpf_xdp_query_id(ifindex, xdp_flags, &prog_id); if (ret < 0) { ret = -errno; fprintf(stderr, "Failed to get XDP program id for ifindex %d, removing program: %s\n", ifindex, strerror(errno)); __sample_remove_xdp(ifindex, 0, xdp_flags); return ret; } sample_xdp_progs[sample_xdp_cnt++] = (struct xdp_desc){ ifindex, prog_id, xdp_flags }; return 0; } static void sample_summary_print(void) { double num = sample_out.rx_cnt.num; if (sample_out.totals.rx) { double pkts = sample_out.totals.rx; print_always(" Packets received : %'-10llu\n", sample_out.totals.rx); print_always(" Average packets/s : %'-10.0f\n", sample_round(pkts / num)); } if (sample_out.totals.redir) { double pkts = sample_out.totals.redir; print_always(" Packets redirected : %'-10llu\n", sample_out.totals.redir); print_always(" Average redir/s : %'-10.0f\n", sample_round(pkts / num)); } if (sample_out.totals.drop) print_always(" Rx dropped : %'-10llu\n", sample_out.totals.drop); if (sample_out.totals.drop_xmit) print_always(" Tx dropped : %'-10llu\n", sample_out.totals.drop_xmit); if (sample_out.totals.err) print_always(" Errors recorded : %'-10llu\n", sample_out.totals.err); if (sample_out.totals.xmit) { double pkts = sample_out.totals.xmit; print_always(" Packets transmitted : %'-10llu\n", sample_out.totals.xmit); print_always(" Average transmit/s : %'-10.0f\n", sample_round(pkts / num)); } } void sample_exit(int status) { size_t size; for (int i = 0; i < NUM_MAP; i++) { size = sample_map_count[i] * sizeof(**sample_mmap); munmap(sample_mmap[i], size); } while (sample_xdp_cnt--) { int i = sample_xdp_cnt, ifindex, xdp_flags; __u32 prog_id; prog_id = sample_xdp_progs[i].prog_id; ifindex = sample_xdp_progs[i].ifindex; xdp_flags = sample_xdp_progs[i].flags; __sample_remove_xdp(ifindex, prog_id, xdp_flags); } sample_summary_print(); close(sample_sig_fd); exit(status); } static int sample_stats_collect(struct stats_record *rec) { int i; if (sample_mask & SAMPLE_RX_CNT) map_collect_percpu(sample_mmap[MAP_RX], &rec->rx_cnt); if (sample_mask & SAMPLE_REDIRECT_CNT) map_collect_percpu(sample_mmap[MAP_REDIRECT_ERR], &rec->redir_err[0]); if (sample_mask & SAMPLE_REDIRECT_ERR_CNT) { for (i = 1; i < XDP_REDIRECT_ERR_MAX; i++) map_collect_percpu(&sample_mmap[MAP_REDIRECT_ERR][i * sample_n_cpus], &rec->redir_err[i]); } if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) for (i = 0; i < sample_n_cpus; i++) map_collect_percpu(&sample_mmap[MAP_CPUMAP_ENQUEUE][i * sample_n_cpus], &rec->enq[i]); if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) map_collect_percpu(sample_mmap[MAP_CPUMAP_KTHREAD], &rec->kthread); if (sample_mask & SAMPLE_EXCEPTION_CNT) for (i = 0; i < XDP_ACTION_MAX; i++) map_collect_percpu(&sample_mmap[MAP_EXCEPTION][i * sample_n_cpus], &rec->exception[i]); if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) map_collect_percpu(sample_mmap[MAP_DEVMAP_XMIT], &rec->devmap_xmit); if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) { if (map_collect_percpu_devmap(bpf_map__fd(sample_map[MAP_DEVMAP_XMIT_MULTI]), rec) < 0) return -EINVAL; } return 0; } static void sample_summary_update(struct sample_output *out) { sample_out.totals.rx += out->totals.rx; sample_out.totals.redir += out->totals.redir; sample_out.totals.drop += out->totals.drop; sample_out.totals.drop_xmit += out->totals.drop_xmit; sample_out.totals.err += out->totals.err; sample_out.totals.xmit += out->totals.xmit; sample_out.rx_cnt.num++; } static void sample_stats_print(int mask, struct stats_record *cur, struct stats_record *prev, char *prog_name) { struct sample_output out = {}; if (mask & SAMPLE_RX_CNT) stats_get_rx_cnt(cur, prev, 0, &out); if (mask & SAMPLE_REDIRECT_CNT) stats_get_redirect_cnt(cur, prev, 0, &out); if (mask & SAMPLE_REDIRECT_ERR_CNT) stats_get_redirect_err_cnt(cur, prev, 0, &out); if (mask & SAMPLE_EXCEPTION_CNT) stats_get_exception_cnt(cur, prev, 0, &out); if (mask & SAMPLE_DEVMAP_XMIT_CNT) stats_get_devmap_xmit(cur, prev, 0, &out); else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) stats_get_devmap_xmit_multi(cur, prev, 0, &out, mask & SAMPLE_DEVMAP_XMIT_CNT); sample_summary_update(&out); stats_print(prog_name, mask, cur, prev, &out); } void sample_switch_mode(void) { sample_log_level ^= LL_DEBUG - 1; } static int sample_signal_cb(void) { struct signalfd_siginfo si; int r; r = read(sample_sig_fd, &si, sizeof(si)); if (r < 0) return -errno; switch (si.ssi_signo) { case SIGQUIT: sample_switch_mode(); printf("\n"); break; default: printf("\n"); return 1; } return 0; } /* Pointer swap trick */ static void swap(struct stats_record **a, struct stats_record **b) { struct stats_record *tmp; tmp = *a; *a = *b; *b = tmp; } static int sample_timer_cb(int timerfd, struct stats_record **rec, struct stats_record **prev) { char line[64] = "Summary"; int ret; __u64 t; ret = read(timerfd, &t, sizeof(t)); if (ret < 0) return -errno; swap(prev, rec); ret = sample_stats_collect(*rec); if (ret < 0) return ret; if (sample_xdp_cnt == 2 && !(sample_mask & SAMPLE_SKIP_HEADING)) { char fi[IFNAMSIZ]; char to[IFNAMSIZ]; const char *f, *t; f = t = NULL; if (if_indextoname(sample_xdp_progs[0].ifindex, fi)) f = fi; if (if_indextoname(sample_xdp_progs[1].ifindex, to)) t = to; snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?"); } sample_stats_print(sample_mask, *rec, *prev, line); return 0; } int sample_run(int interval, void (*post_cb)(void *), void *ctx) { struct timespec ts = { interval, 0 }; struct itimerspec its = { ts, ts }; struct stats_record *rec, *prev; struct pollfd pfd[2] = {}; int timerfd, ret; if (!interval) { fprintf(stderr, "Incorrect interval 0\n"); return -EINVAL; } sample_interval = interval; /* Pretty print numbers */ setlocale(LC_NUMERIC, "en_US.UTF-8"); timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK); if (timerfd < 0) return -errno; timerfd_settime(timerfd, 0, &its, NULL); pfd[0].fd = sample_sig_fd; pfd[0].events = POLLIN; pfd[1].fd = timerfd; pfd[1].events = POLLIN; ret = -ENOMEM; rec = alloc_stats_record(); if (!rec) goto end; prev = alloc_stats_record(); if (!prev) goto end_rec; ret = sample_stats_collect(rec); if (ret < 0) goto end_rec_prev; for (;;) { ret = poll(pfd, 2, -1); if (ret < 0) { if (errno == EINTR) continue; else break; } if (pfd[0].revents & POLLIN) ret = sample_signal_cb(); else if (pfd[1].revents & POLLIN) ret = sample_timer_cb(timerfd, &rec, &prev); if (ret) break; if (post_cb) post_cb(ctx); } end_rec_prev: free_stats_record(prev); end_rec: free_stats_record(rec); end: close(timerfd); return ret; } const char *get_driver_name(int ifindex) { struct ethtool_drvinfo drv = {}; char ifname[IF_NAMESIZE]; static char drvname[32]; struct ifreq ifr = {}; int fd, r = 0; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return "[error]"; if (!if_indextoname(ifindex, ifname)) goto end; drv.cmd = ETHTOOL_GDRVINFO; safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); ifr.ifr_data = (void *)&drv; r = ioctl(fd, SIOCETHTOOL, &ifr); if (r) goto end; safe_strncpy(drvname, drv.driver, sizeof(drvname)); close(fd); return drvname; end: r = errno; close(fd); return r == EOPNOTSUPP ? "loopback" : "[error]"; } int get_mac_addr(int ifindex, void *mac_addr) { char ifname[IF_NAMESIZE]; struct ifreq ifr = {}; int fd, r; fd = socket(AF_INET, SOCK_DGRAM, 0); if (fd < 0) return -errno; if (!if_indextoname(ifindex, ifname)) { r = -errno; goto end; } safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); r = ioctl(fd, SIOCGIFHWADDR, &ifr); if (r) { r = -errno; goto end; } memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char)); end: close(fd); return r; } __attribute__((constructor)) static void sample_ctor(void) { if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) { fprintf(stderr, "Failed to set libbpf strict mode: %s\n", strerror(errno)); /* Just exit, nothing to cleanup right now */ exit(EXIT_FAIL_BPF); } }
linux-master
samples/bpf/xdp_sample_user.c
/* This test is a demo of using get_socket_uid and get_socket_cookie * helper function to do per socket based network traffic monitoring. * It requires iptables version higher then 1.6.1. to load pinned eBPF * program into the xt_bpf match. * * TEST: * ./run_cookie_uid_helper_example.sh -option * option: * -t: do traffic monitoring test, the program will continuously * print out network traffic happens after program started A sample * output is shown below: * * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058 * cookie: 132, uid: 0x0, Pakcet Count: 2, Bytes Count: 286 * cookie: 812, uid: 0x3e8, Pakcet Count: 3, Bytes Count: 1726 * cookie: 802, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104 * cookie: 877, uid: 0x3e8, Pakcet Count: 20, Bytes Count: 11058 * cookie: 831, uid: 0x3e8, Pakcet Count: 2, Bytes Count: 104 * cookie: 0, uid: 0x0, Pakcet Count: 6, Bytes Count: 712 * cookie: 880, uid: 0xfffe, Pakcet Count: 1, Bytes Count: 70 * * -s: do getsockopt SO_COOKIE test, the program will set up a pair of * UDP sockets and send packets between them. And read out the traffic data * directly from the ebpf map based on the socket cookie. * * Clean up: if using shell script, the script file will delete the iptables * rule and unmount the bpf program when exit. Else the iptables rule need * to be deleted by hand, see run_cookie_uid_helper_example.sh for detail. */ #define _GNU_SOURCE #define offsetof(type, member) __builtin_offsetof(type, member) #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) #include <arpa/inet.h> #include <errno.h> #include <error.h> #include <limits.h> #include <linux/bpf.h> #include <linux/if_ether.h> #include <net/if.h> #include <signal.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <bpf/bpf.h> #include "bpf_insn.h" #define PORT 8888 struct stats { uint32_t uid; uint64_t packets; uint64_t bytes; }; static int map_fd, prog_fd; static bool test_finish; static void maps_create(void) { map_fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(uint32_t), sizeof(struct stats), 100, NULL); if (map_fd < 0) error(1, errno, "map create failed!\n"); } static void prog_load(void) { static char log_buf[1 << 16]; struct bpf_insn prog[] = { /* * Save sk_buff for future usage. value stored in R6 to R10 will * not be reset after a bpf helper function call. */ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* * pc1: BPF_FUNC_get_socket_cookie takes one parameter, * R1: sk_buff */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_socket_cookie), /* pc2-4: save &socketCookie to r7 for future usage*/ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), /* * pc5-8: set up the registers for BPF_FUNC_map_lookup_elem, * it takes two parameters (R1: map_fd, R2: &socket_cookie) */ BPF_LD_MAP_FD(BPF_REG_1, map_fd), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), /* * pc9. if r0 != 0x0, go to pc+14, since we have the cookie * stored already * Otherwise do pc10-22 to setup a new data entry. */ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 14), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_socket_uid), /* * Place a struct stats in the R10 stack and sequentially * place the member value into the memory. Packets value * is set by directly place a IMM value 1 into the stack. */ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32 + (__s16)offsetof(struct stats, uid)), BPF_ST_MEM(BPF_DW, BPF_REG_10, -32 + (__s16)offsetof(struct stats, packets), 1), /* * __sk_buff is a special struct used for eBPF program to * directly access some sk_buff field. */ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -32 + (__s16)offsetof(struct stats, bytes)), /* * add new map entry using BPF_FUNC_map_update_elem, it takes * 4 parameters (R1: map_fd, R2: &socket_cookie, R3: &stats, * R4: flags) */ BPF_LD_MAP_FD(BPF_REG_1, map_fd), BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32), BPF_MOV64_IMM(BPF_REG_4, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), BPF_JMP_IMM(BPF_JA, 0, 0, 5), /* * pc24-30 update the packet info to a exist data entry, it can * be done by directly write to pointers instead of using * BPF_FUNC_map_update_elem helper function */ BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), BPF_MOV64_IMM(BPF_REG_1, 1), BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1, offsetof(struct stats, packets)), BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1, offsetof(struct stats, bytes)), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct __sk_buff, len)), BPF_EXIT_INSN(), }; LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = log_buf, .log_size = sizeof(log_buf), ); prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", prog, ARRAY_SIZE(prog), &opts); if (prog_fd < 0) error(1, errno, "failed to load prog\n%s\n", log_buf); } static void prog_attach_iptables(char *file) { int ret; char rules[256]; if (bpf_obj_pin(prog_fd, file)) error(1, errno, "bpf_obj_pin"); if (strlen(file) > 50) { printf("file path too long: %s\n", file); exit(1); } ret = snprintf(rules, sizeof(rules), "iptables -A OUTPUT -m bpf --object-pinned %s -j ACCEPT", file); if (ret < 0 || ret >= sizeof(rules)) { printf("error constructing iptables command\n"); exit(1); } ret = system(rules); if (ret < 0) { printf("iptables rule update failed: %d/n", WEXITSTATUS(ret)); exit(1); } } static void print_table(void) { struct stats curEntry; uint32_t curN = UINT32_MAX; uint32_t nextN; int res; while (bpf_map_get_next_key(map_fd, &curN, &nextN) > -1) { curN = nextN; res = bpf_map_lookup_elem(map_fd, &curN, &curEntry); if (res < 0) { error(1, errno, "fail to get entry value of Key: %u\n", curN); } else { printf("cookie: %u, uid: 0x%x, Packet Count: %lu," " Bytes Count: %lu\n", curN, curEntry.uid, curEntry.packets, curEntry.bytes); } } } static void udp_client(void) { struct sockaddr_in si_other = {0}; struct sockaddr_in si_me = {0}; struct stats dataEntry; int s_rcv, s_send, i, recv_len; char message = 'a'; char buf; uint64_t cookie; int res; socklen_t cookie_len = sizeof(cookie); socklen_t slen = sizeof(si_other); s_rcv = socket(PF_INET, SOCK_DGRAM, 0); if (s_rcv < 0) error(1, errno, "rcv socket creat failed!\n"); si_other.sin_family = AF_INET; si_other.sin_port = htons(PORT); if (inet_aton("127.0.0.1", &si_other.sin_addr) == 0) error(1, errno, "inet_aton\n"); if (bind(s_rcv, (struct sockaddr *)&si_other, sizeof(si_other)) == -1) error(1, errno, "bind\n"); s_send = socket(PF_INET, SOCK_DGRAM, 0); if (s_send < 0) error(1, errno, "send socket creat failed!\n"); res = getsockopt(s_send, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len); if (res < 0) printf("get cookie failed: %s\n", strerror(errno)); res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry); if (res != -1) error(1, errno, "socket stat found while flow not active\n"); for (i = 0; i < 10; i++) { res = sendto(s_send, &message, sizeof(message), 0, (struct sockaddr *)&si_other, slen); if (res == -1) error(1, errno, "send\n"); if (res != sizeof(message)) error(1, 0, "%uB != %luB\n", res, sizeof(message)); recv_len = recvfrom(s_rcv, &buf, sizeof(buf), 0, (struct sockaddr *)&si_me, &slen); if (recv_len < 0) error(1, errno, "receive\n"); res = memcmp(&(si_other.sin_addr), &(si_me.sin_addr), sizeof(si_me.sin_addr)); if (res != 0) error(1, EFAULT, "sender addr error: %d\n", res); printf("Message received: %c\n", buf); res = bpf_map_lookup_elem(map_fd, &cookie, &dataEntry); if (res < 0) error(1, errno, "lookup sk stat failed, cookie: %lu\n", cookie); printf("cookie: %lu, uid: 0x%x, Packet Count: %lu," " Bytes Count: %lu\n\n", cookie, dataEntry.uid, dataEntry.packets, dataEntry.bytes); } close(s_send); close(s_rcv); } static int usage(void) { printf("Usage: ./run_cookie_uid_helper_example.sh" " bpfObjName -option\n" " -t traffic monitor test\n" " -s getsockopt cookie test\n"); return 1; } static void finish(int ret) { test_finish = true; } int main(int argc, char *argv[]) { int opt; bool cfg_test_traffic = false; bool cfg_test_cookie = false; if (argc != 3) return usage(); while ((opt = getopt(argc, argv, "ts")) != -1) { switch (opt) { case 't': cfg_test_traffic = true; break; case 's': cfg_test_cookie = true; break; default: printf("unknown option %c\n", opt); usage(); return -1; } } maps_create(); prog_load(); prog_attach_iptables(argv[2]); if (cfg_test_traffic) { if (signal(SIGINT, finish) == SIG_ERR) error(1, errno, "register SIGINT handler failed"); if (signal(SIGTERM, finish) == SIG_ERR) error(1, errno, "register SIGTERM handler failed"); while (!test_finish) { print_table(); printf("\n"); sleep(1); } } else if (cfg_test_cookie) { udp_client(); } close(prog_fd); close(map_fd); return 0; }
linux-master
samples/bpf/cookie_uid_helper_example.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Facebook */ #include <linux/unistd.h> #include <linux/bpf.h> #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <bpf/bpf.h> static void usage(void) { printf("Usage: tc_l2_ipip_redirect [...]\n"); printf(" -U <file> Update an already pinned BPF array\n"); printf(" -i <ifindex> Interface index\n"); printf(" -h Display this help\n"); } int main(int argc, char **argv) { const char *pinned_file = NULL; int ifindex = -1; int array_key = 0; int array_fd = -1; int ret = -1; int opt; while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { switch (opt) { /* General args */ case 'U': pinned_file = optarg; break; case 'i': ifindex = atoi(optarg); break; default: usage(); goto out; } } if (ifindex < 0 || !pinned_file) { usage(); goto out; } array_fd = bpf_obj_get(pinned_file); if (array_fd < 0) { fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", pinned_file, strerror(errno), errno); goto out; } /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ ret = bpf_map_update_elem(array_fd, &array_key, &ifindex, 0); if (ret) { perror("bpf_map_update_elem"); goto out; } out: if (array_fd != -1) close(array_fd); return ret; }
linux-master
samples/bpf/tc_l2_redirect_user.c
/* Copyright (c) 2016 Thomas Graf <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "vmlinux.h" #include "net_shared.h" #include <bpf/bpf_helpers.h> #include <string.h> # define printk(fmt, ...) \ ({ \ char ____fmt[] = fmt; \ bpf_trace_printk(____fmt, sizeof(____fmt), \ ##__VA_ARGS__); \ }) #define CB_MAGIC 1234 /* Test: Pass all packets through */ SEC("nop") int do_nop(struct __sk_buff *skb) { return BPF_OK; } /* Test: Verify context information can be accessed */ SEC("test_ctx") int do_test_ctx(struct __sk_buff *skb) { skb->cb[0] = CB_MAGIC; printk("len %d hash %d protocol %d", skb->len, skb->hash, skb->protocol); printk("cb %d ingress_ifindex %d ifindex %d", skb->cb[0], skb->ingress_ifindex, skb->ifindex); return BPF_OK; } /* Test: Ensure skb->cb[] buffer is cleared */ SEC("test_cb") int do_test_cb(struct __sk_buff *skb) { printk("cb0: %x cb1: %x cb2: %x", skb->cb[0], skb->cb[1], skb->cb[2]); printk("cb3: %x cb4: %x", skb->cb[3], skb->cb[4]); return BPF_OK; } /* Test: Verify skb data can be read */ SEC("test_data") int do_test_data(struct __sk_buff *skb) { void *data = (void *)(long)skb->data; void *data_end = (void *)(long)skb->data_end; struct iphdr *iph = data; if (data + sizeof(*iph) > data_end) { printk("packet truncated"); return BPF_DROP; } printk("src: %x dst: %x", iph->saddr, iph->daddr); return BPF_OK; } #define IP_CSUM_OFF offsetof(struct iphdr, check) #define IP_DST_OFF offsetof(struct iphdr, daddr) #define IP_SRC_OFF offsetof(struct iphdr, saddr) #define IP_PROTO_OFF offsetof(struct iphdr, protocol) #define TCP_CSUM_OFF offsetof(struct tcphdr, check) #define UDP_CSUM_OFF offsetof(struct udphdr, check) #define IS_PSEUDO 0x10 static inline int rewrite(struct __sk_buff *skb, uint32_t old_ip, uint32_t new_ip, int rw_daddr) { int ret, off = 0, flags = IS_PSEUDO; uint8_t proto; ret = bpf_skb_load_bytes(skb, IP_PROTO_OFF, &proto, 1); if (ret < 0) { printk("bpf_l4_csum_replace failed: %d", ret); return BPF_DROP; } switch (proto) { case IPPROTO_TCP: off = TCP_CSUM_OFF; break; case IPPROTO_UDP: off = UDP_CSUM_OFF; flags |= BPF_F_MARK_MANGLED_0; break; case IPPROTO_ICMPV6: off = offsetof(struct icmp6hdr, icmp6_cksum); break; } if (off) { ret = bpf_l4_csum_replace(skb, off, old_ip, new_ip, flags | sizeof(new_ip)); if (ret < 0) { printk("bpf_l4_csum_replace failed: %d"); return BPF_DROP; } } ret = bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); if (ret < 0) { printk("bpf_l3_csum_replace failed: %d", ret); return BPF_DROP; } if (rw_daddr) ret = bpf_skb_store_bytes(skb, IP_DST_OFF, &new_ip, sizeof(new_ip), 0); else ret = bpf_skb_store_bytes(skb, IP_SRC_OFF, &new_ip, sizeof(new_ip), 0); if (ret < 0) { printk("bpf_skb_store_bytes() failed: %d", ret); return BPF_DROP; } return BPF_OK; } /* Test: Verify skb data can be modified */ SEC("test_rewrite") int do_test_rewrite(struct __sk_buff *skb) { uint32_t old_ip, new_ip = 0x3fea8c0; int ret; ret = bpf_skb_load_bytes(skb, IP_DST_OFF, &old_ip, 4); if (ret < 0) { printk("bpf_skb_load_bytes failed: %d", ret); return BPF_DROP; } if (old_ip == 0x2fea8c0) { printk("out: rewriting from %x to %x", old_ip, new_ip); return rewrite(skb, old_ip, new_ip, 1); } return BPF_OK; } static inline int __do_push_ll_and_redirect(struct __sk_buff *skb) { uint64_t smac = SRC_MAC, dmac = DST_MAC; int ret, ifindex = DST_IFINDEX; struct ethhdr ehdr; ret = bpf_skb_change_head(skb, 14, 0); if (ret < 0) { printk("skb_change_head() failed: %d", ret); } ehdr.h_proto = bpf_htons(ETH_P_IP); memcpy(&ehdr.h_source, &smac, 6); memcpy(&ehdr.h_dest, &dmac, 6); ret = bpf_skb_store_bytes(skb, 0, &ehdr, sizeof(ehdr), 0); if (ret < 0) { printk("skb_store_bytes() failed: %d", ret); return BPF_DROP; } return bpf_redirect(ifindex, 0); } SEC("push_ll_and_redirect_silent") int do_push_ll_and_redirect_silent(struct __sk_buff *skb) { return __do_push_ll_and_redirect(skb); } SEC("push_ll_and_redirect") int do_push_ll_and_redirect(struct __sk_buff *skb) { int ret, ifindex = DST_IFINDEX; ret = __do_push_ll_and_redirect(skb); if (ret >= 0) printk("redirected to %d", ifindex); return ret; } static inline void __fill_garbage(struct __sk_buff *skb) { uint64_t f = 0xFFFFFFFFFFFFFFFF; bpf_skb_store_bytes(skb, 0, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 8, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 16, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 24, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 32, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 40, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 48, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 56, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 64, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 72, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 80, &f, sizeof(f), 0); bpf_skb_store_bytes(skb, 88, &f, sizeof(f), 0); } SEC("fill_garbage") int do_fill_garbage(struct __sk_buff *skb) { __fill_garbage(skb); printk("Set initial 96 bytes of header to FF"); return BPF_OK; } SEC("fill_garbage_and_redirect") int do_fill_garbage_and_redirect(struct __sk_buff *skb) { int ifindex = DST_IFINDEX; __fill_garbage(skb); printk("redirected to %d", ifindex); return bpf_redirect(ifindex, 0); } /* Drop all packets */ SEC("drop_all") int do_drop_all(struct __sk_buff *skb) { printk("dropping with: %d", BPF_DROP); return BPF_DROP; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/test_lwt_bpf.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <unistd.h> #include <bpf/libbpf.h> #include "trace_helpers.h" int main(int ac, char **argv) { struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; FILE *f; snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (!prog) { fprintf(stderr, "ERROR: finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } f = popen("taskset 1 ping -c5 localhost", "r"); (void) f; read_trace_pipe(); cleanup: bpf_link__destroy(link); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/tracex1_user.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2017 Facebook */ #include <stdio.h> #include <unistd.h> #include <fcntl.h> #include <stdlib.h> #include <string.h> #include <linux/perf_event.h> #include <errno.h> #include <bpf/libbpf.h> #include <bpf/bpf.h> /* This program verifies bpf attachment to tracepoint sys_enter_* and sys_exit_*. * This requires kernel CONFIG_FTRACE_SYSCALLS to be set. */ static void usage(const char *cmd) { printf("USAGE: %s [-i num_progs] [-h]\n", cmd); printf(" -i num_progs # number of progs of the test\n"); printf(" -h # help\n"); } static void verify_map(int map_id) { __u32 key = 0; __u32 val; if (bpf_map_lookup_elem(map_id, &key, &val) != 0) { fprintf(stderr, "map_lookup failed: %s\n", strerror(errno)); return; } if (val == 0) { fprintf(stderr, "failed: map #%d returns value 0\n", map_id); return; } printf("verify map:%d val: %d\n", map_id, val); val = 0; if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) { fprintf(stderr, "map_update failed: %s\n", strerror(errno)); return; } } static int test(char *filename, int num_progs) { int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0; struct bpf_link *links[num_progs * 4]; struct bpf_object *objs[num_progs]; struct bpf_program *prog; for (i = 0; i < num_progs; i++) { objs[i] = bpf_object__open_file(filename, NULL); if (libbpf_get_error(objs[i])) { fprintf(stderr, "opening BPF object file failed\n"); objs[i] = NULL; goto cleanup; } /* load BPF program */ if (bpf_object__load(objs[i])) { fprintf(stderr, "loading BPF object file failed\n"); goto cleanup; } map0_fds[i] = bpf_object__find_map_fd_by_name(objs[i], "enter_open_map"); map1_fds[i] = bpf_object__find_map_fd_by_name(objs[i], "exit_open_map"); if (map0_fds[i] < 0 || map1_fds[i] < 0) { fprintf(stderr, "finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, objs[i]) { links[j] = bpf_program__attach(prog); if (libbpf_get_error(links[j])) { fprintf(stderr, "bpf_program__attach failed\n"); links[j] = NULL; goto cleanup; } j++; } printf("prog #%d: map ids %d %d\n", i, map0_fds[i], map1_fds[i]); } /* current load_bpf_file has perf_event_open default pid = -1 * and cpu = 0, which permits attached bpf execution on * all cpus for all pid's. bpf program execution ignores * cpu affinity. */ /* trigger some "open" operations */ fd = open(filename, O_RDONLY); if (fd < 0) { fprintf(stderr, "open failed: %s\n", strerror(errno)); return 1; } close(fd); /* verify the map */ for (i = 0; i < num_progs; i++) { verify_map(map0_fds[i]); verify_map(map1_fds[i]); } cleanup: for (j--; j >= 0; j--) bpf_link__destroy(links[j]); for (i--; i >= 0; i--) bpf_object__close(objs[i]); return 0; } int main(int argc, char **argv) { int opt, num_progs = 1; char filename[256]; while ((opt = getopt(argc, argv, "i:h")) != -1) { switch (opt) { case 'i': num_progs = atoi(optarg); break; case 'h': default: usage(argv[0]); return 0; } } snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); return test(filename, num_progs); }
linux-master
samples/bpf/syscall_tp_user.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Facebook */ #define _GNU_SOURCE #include <sched.h> #include <stdio.h> #include <sys/types.h> #include <asm/unistd.h> #include <unistd.h> #include <assert.h> #include <sys/wait.h> #include <stdlib.h> #include <signal.h> #include <string.h> #include <time.h> #include <arpa/inet.h> #include <errno.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #define TEST_BIT(t) (1U << (t)) #define MAX_NR_CPUS 1024 static __u64 time_get_ns(void) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec * 1000000000ull + ts.tv_nsec; } enum test_type { HASH_PREALLOC, PERCPU_HASH_PREALLOC, HASH_KMALLOC, PERCPU_HASH_KMALLOC, LRU_HASH_PREALLOC, NOCOMMON_LRU_HASH_PREALLOC, LPM_KMALLOC, HASH_LOOKUP, ARRAY_LOOKUP, INNER_LRU_HASH_PREALLOC, LRU_HASH_LOOKUP, NR_TESTS, }; const char *test_map_names[NR_TESTS] = { [HASH_PREALLOC] = "hash_map", [PERCPU_HASH_PREALLOC] = "percpu_hash_map", [HASH_KMALLOC] = "hash_map_alloc", [PERCPU_HASH_KMALLOC] = "percpu_hash_map_alloc", [LRU_HASH_PREALLOC] = "lru_hash_map", [NOCOMMON_LRU_HASH_PREALLOC] = "nocommon_lru_hash_map", [LPM_KMALLOC] = "lpm_trie_map_alloc", [HASH_LOOKUP] = "hash_map", [ARRAY_LOOKUP] = "array_map", [INNER_LRU_HASH_PREALLOC] = "inner_lru_hash_map", [LRU_HASH_LOOKUP] = "lru_hash_lookup_map", }; enum map_idx { array_of_lru_hashs_idx, hash_map_alloc_idx, lru_hash_lookup_idx, NR_IDXES, }; static int map_fd[NR_IDXES]; static int test_flags = ~0; static uint32_t num_map_entries; static uint32_t inner_lru_hash_size; static int lru_hash_lookup_test_entries = 32; static uint32_t max_cnt = 10000; static int check_test_flags(enum test_type t) { return test_flags & TEST_BIT(t); } static void test_hash_prealloc(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_getuid); printf("%d:hash_map_perf pre-alloc %lld events per sec\n", cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static int pre_test_lru_hash_lookup(int tasks) { int fd = map_fd[lru_hash_lookup_idx]; uint32_t key; long val = 1; int ret; if (num_map_entries > lru_hash_lookup_test_entries) lru_hash_lookup_test_entries = num_map_entries; /* Populate the lru_hash_map for LRU_HASH_LOOKUP perf test. * * It is fine that the user requests for a map with * num_map_entries < 32 and some of the later lru hash lookup * may return not found. For LRU map, we are not interested * in such small map performance. */ for (key = 0; key < lru_hash_lookup_test_entries; key++) { ret = bpf_map_update_elem(fd, &key, &val, BPF_NOEXIST); if (ret) return ret; } return 0; } static void do_test_lru(enum test_type test, int cpu) { static int inner_lru_map_fds[MAX_NR_CPUS]; struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 }; const char *test_name; __u64 start_time; int i, ret; if (test == INNER_LRU_HASH_PREALLOC && cpu) { /* If CPU is not 0, create inner_lru hash map and insert the fd * value into the array_of_lru_hash map. In case of CPU 0, * 'inner_lru_hash_map' was statically inserted on the map init */ int outer_fd = map_fd[array_of_lru_hashs_idx]; unsigned int mycpu, mynode; LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NUMA_NODE, ); assert(cpu < MAX_NR_CPUS); ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); assert(!ret); opts.numa_node = mynode; inner_lru_map_fds[cpu] = bpf_map_create(BPF_MAP_TYPE_LRU_HASH, test_map_names[INNER_LRU_HASH_PREALLOC], sizeof(uint32_t), sizeof(long), inner_lru_hash_size, &opts); if (inner_lru_map_fds[cpu] == -1) { printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", strerror(errno), errno); exit(1); } ret = bpf_map_update_elem(outer_fd, &cpu, &inner_lru_map_fds[cpu], BPF_ANY); if (ret) { printf("cannot update ARRAY_OF_LRU_HASHS with key:%u. %s(%d)\n", cpu, strerror(errno), errno); exit(1); } } in6.sin6_addr.s6_addr16[0] = 0xdead; in6.sin6_addr.s6_addr16[1] = 0xbeef; if (test == LRU_HASH_PREALLOC) { test_name = "lru_hash_map_perf"; in6.sin6_addr.s6_addr16[2] = 0; } else if (test == NOCOMMON_LRU_HASH_PREALLOC) { test_name = "nocommon_lru_hash_map_perf"; in6.sin6_addr.s6_addr16[2] = 1; } else if (test == INNER_LRU_HASH_PREALLOC) { test_name = "inner_lru_hash_map_perf"; in6.sin6_addr.s6_addr16[2] = 2; } else if (test == LRU_HASH_LOOKUP) { test_name = "lru_hash_lookup_perf"; in6.sin6_addr.s6_addr16[2] = 3; in6.sin6_addr.s6_addr32[3] = 0; } else { assert(0); } start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) { ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6)); assert(ret == -1 && errno == EBADF); if (in6.sin6_addr.s6_addr32[3] < lru_hash_lookup_test_entries - 32) in6.sin6_addr.s6_addr32[3] += 32; else in6.sin6_addr.s6_addr32[3] = 0; } printf("%d:%s pre-alloc %lld events per sec\n", cpu, test_name, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_lru_hash_prealloc(int cpu) { do_test_lru(LRU_HASH_PREALLOC, cpu); } static void test_nocommon_lru_hash_prealloc(int cpu) { do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu); } static void test_inner_lru_hash_prealloc(int cpu) { do_test_lru(INNER_LRU_HASH_PREALLOC, cpu); } static void test_lru_hash_lookup(int cpu) { do_test_lru(LRU_HASH_LOOKUP, cpu); } static void test_percpu_hash_prealloc(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_geteuid); printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n", cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_hash_kmalloc(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_getgid); printf("%d:hash_map_perf kmalloc %lld events per sec\n", cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_percpu_hash_kmalloc(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_getegid); printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n", cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_lpm_kmalloc(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_gettid); printf("%d:lpm_perf kmalloc %lld events per sec\n", cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time)); } static void test_hash_lookup(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_getpgid, 0); printf("%d:hash_lookup %lld lookups per sec\n", cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time)); } static void test_array_lookup(int cpu) { __u64 start_time; int i; start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) syscall(__NR_getppid, 0); printf("%d:array_lookup %lld lookups per sec\n", cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time)); } typedef int (*pre_test_func)(int tasks); const pre_test_func pre_test_funcs[] = { [LRU_HASH_LOOKUP] = pre_test_lru_hash_lookup, }; typedef void (*test_func)(int cpu); const test_func test_funcs[] = { [HASH_PREALLOC] = test_hash_prealloc, [PERCPU_HASH_PREALLOC] = test_percpu_hash_prealloc, [HASH_KMALLOC] = test_hash_kmalloc, [PERCPU_HASH_KMALLOC] = test_percpu_hash_kmalloc, [LRU_HASH_PREALLOC] = test_lru_hash_prealloc, [NOCOMMON_LRU_HASH_PREALLOC] = test_nocommon_lru_hash_prealloc, [LPM_KMALLOC] = test_lpm_kmalloc, [HASH_LOOKUP] = test_hash_lookup, [ARRAY_LOOKUP] = test_array_lookup, [INNER_LRU_HASH_PREALLOC] = test_inner_lru_hash_prealloc, [LRU_HASH_LOOKUP] = test_lru_hash_lookup, }; static int pre_test(int tasks) { int i; for (i = 0; i < NR_TESTS; i++) { if (pre_test_funcs[i] && check_test_flags(i)) { int ret = pre_test_funcs[i](tasks); if (ret) return ret; } } return 0; } static void loop(int cpu) { cpu_set_t cpuset; int i; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); sched_setaffinity(0, sizeof(cpuset), &cpuset); for (i = 0; i < NR_TESTS; i++) { if (check_test_flags(i)) test_funcs[i](cpu); } } static void run_perf_test(int tasks) { pid_t pid[tasks]; int i; assert(!pre_test(tasks)); for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { loop(i); exit(0); } else if (pid[i] == -1) { printf("couldn't spawn #%d process\n", i); exit(1); } } for (i = 0; i < tasks; i++) { int status; assert(waitpid(pid[i], &status, 0) == pid[i]); assert(status == 0); } } static void fill_lpm_trie(void) { struct bpf_lpm_trie_key *key; unsigned long value = 0; unsigned int i; int r; key = alloca(sizeof(*key) + 4); key->prefixlen = 32; for (i = 0; i < 512; ++i) { key->prefixlen = rand() % 33; key->data[0] = rand() & 0xff; key->data[1] = rand() & 0xff; key->data[2] = rand() & 0xff; key->data[3] = rand() & 0xff; r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0); assert(!r); } key->prefixlen = 32; key->data[0] = 192; key->data[1] = 168; key->data[2] = 0; key->data[3] = 1; value = 128; r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0); assert(!r); } static void fixup_map(struct bpf_object *obj) { struct bpf_map *map; int i; bpf_object__for_each_map(map, obj) { const char *name = bpf_map__name(map); /* Only change the max_entries for the enabled test(s) */ for (i = 0; i < NR_TESTS; i++) { if (!strcmp(test_map_names[i], name) && (check_test_flags(i))) { bpf_map__set_max_entries(map, num_map_entries); continue; } } } inner_lru_hash_size = num_map_entries; } int main(int argc, char **argv) { int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); struct bpf_link *links[8]; struct bpf_program *prog; struct bpf_object *obj; struct bpf_map *map; char filename[256]; int i = 0; if (argc > 1) test_flags = atoi(argv[1]) ? : test_flags; if (argc > 2) nr_cpus = atoi(argv[2]) ? : nr_cpus; if (argc > 3) num_map_entries = atoi(argv[3]); if (argc > 4) max_cnt = atoi(argv[4]); snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map"); if (libbpf_get_error(map)) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } inner_lru_hash_size = bpf_map__max_entries(map); if (!inner_lru_hash_size) { fprintf(stderr, "ERROR: failed to get map attribute\n"); goto cleanup; } /* resize BPF map prior to loading */ if (num_map_entries > 0) fixup_map(obj); /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs"); map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc"); map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map"); if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { links[i] = bpf_program__attach(prog); if (libbpf_get_error(links[i])) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); links[i] = NULL; goto cleanup; } i++; } fill_lpm_trie(); run_perf_test(nr_cpus); cleanup: for (i--; i >= 0; i--) bpf_link__destroy(links[i]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/map_perf_test_user.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Sample Host Bandwidth Manager (HBM) BPF program. * * A cgroup skb BPF egress program to limit cgroup output bandwidth. * It uses a modified virtual token bucket queue to limit average * egress bandwidth. The implementation uses credits instead of tokens. * Negative credits imply that queueing would have happened (this is * a virtual queue, so no queueing is done by it. However, queueing may * occur at the actual qdisc (which is not used for rate limiting). * * This implementation uses 3 thresholds, one to start marking packets and * the other two to drop packets: * CREDIT * - <--------------------------|------------------------> + * | | | 0 * | Large pkt | * | drop thresh | * Small pkt drop Mark threshold * thresh * * The effect of marking depends on the type of packet: * a) If the packet is ECN enabled and it is a TCP packet, then the packet * is ECN marked. * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr * to reduce the congestion window. The current implementation uses a linear * distribution (0% probability at marking threshold, 100% probability * at drop threshold). * c) If the packet is not a TCP packet, then it is dropped. * * If the credit is below the drop threshold, the packet is dropped. If it * is a TCP packet, then it also calls tcp_cwr since packets dropped by * by a cgroup skb BPF program do not automatically trigger a call to * tcp_cwr in the current kernel code. * * This BPF program actually uses 2 drop thresholds, one threshold * for larger packets (>= 120 bytes) and another for smaller packets. This * protects smaller packets such as SYNs, ACKs, etc. * * The default bandwidth limit is set at 1Gbps but this can be changed by * a user program through a shared BPF map. In addition, by default this BPF * program does not limit connections using loopback. This behavior can be * overwritten by the user program. There is also an option to calculate * some statistics, such as percent of packets marked or dropped, which * the user program can access. * * A latter patch provides such a program (hbm.c) */ #include "hbm_kern.h" SEC("cgroup_skb/egress") int _hbm_out_cg(struct __sk_buff *skb) { struct hbm_pkt_info pkti; int len = skb->len; unsigned int queue_index = 0; unsigned long long curtime; int credit; signed long long delta = 0, new_credit; int max_credit = MAX_CREDIT; bool congestion_flag = false; bool drop_flag = false; bool cwr_flag = false; bool ecn_ce_flag = false; struct hbm_vqueue *qdp; struct hbm_queue_stats *qsp = NULL; int rv = ALLOW_PKT; qsp = bpf_map_lookup_elem(&queue_stats, &queue_index); if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1)) return ALLOW_PKT; hbm_get_pkt_info(skb, &pkti); // We may want to account for the length of headers in len // calculation, like ETH header + overhead, specially if it // is a gso packet. But I am not doing it right now. qdp = bpf_get_local_storage(&queue_state, 0); if (!qdp) return ALLOW_PKT; else if (qdp->lasttime == 0) hbm_init_vqueue(qdp, 1024); curtime = bpf_ktime_get_ns(); // Begin critical section bpf_spin_lock(&qdp->lock); credit = qdp->credit; delta = curtime - qdp->lasttime; /* delta < 0 implies that another process with a curtime greater * than ours beat us to the critical section and already added * the new credit, so we should not add it ourselves */ if (delta > 0) { qdp->lasttime = curtime; new_credit = credit + CREDIT_PER_NS(delta, qdp->rate); if (new_credit > MAX_CREDIT) credit = MAX_CREDIT; else credit = new_credit; } credit -= len; qdp->credit = credit; bpf_spin_unlock(&qdp->lock); // End critical section // Check if we should update rate if (qsp != NULL && (qsp->rate * 128) != qdp->rate) { qdp->rate = qsp->rate * 128; bpf_printk("Updating rate: %d (1sec:%llu bits)\n", (int)qdp->rate, CREDIT_PER_NS(1000000000, qdp->rate) * 8); } // Set flags (drop, congestion, cwr) // Dropping => we are congested, so ignore congestion flag if (credit < -DROP_THRESH || (len > LARGE_PKT_THRESH && credit < -LARGE_PKT_DROP_THRESH)) { // Very congested, set drop packet drop_flag = true; if (pkti.ecn) congestion_flag = true; else if (pkti.is_tcp) cwr_flag = true; } else if (credit < 0) { // Congested, set congestion flag if (pkti.ecn || pkti.is_tcp) { if (credit < -MARK_THRESH) congestion_flag = true; else congestion_flag = false; } else { congestion_flag = true; } } if (congestion_flag) { if (bpf_skb_ecn_set_ce(skb)) { ecn_ce_flag = true; } else { if (pkti.is_tcp) { unsigned int rand = bpf_get_prandom_u32(); if (-credit >= MARK_THRESH + (rand % MARK_REGION_SIZE)) { // Do congestion control cwr_flag = true; } } else if (len > LARGE_PKT_THRESH) { // Problem if too many small packets? drop_flag = true; } } } if (qsp != NULL) if (qsp->no_cn) cwr_flag = false; hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag, cwr_flag, ecn_ce_flag, &pkti, credit); if (drop_flag) { __sync_add_and_fetch(&(qdp->credit), len); rv = DROP_PKT; } if (cwr_flag) rv |= 2; return rv; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/hbm_out_kern.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define KBUILD_MODNAME "foo" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/tcp.h> #include <linux/udp.h> #include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_legacy.h" #define DEFAULT_PKTGEN_UDP_PORT 9 #define IP_MF 0x2000 #define IP_OFFSET 0x1FFF static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) { return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) & (IP_MF | IP_OFFSET); } SEC("ldabs") int handle_ingress(struct __sk_buff *skb) { __u64 troff = ETH_HLEN + sizeof(struct iphdr); if (load_half(skb, offsetof(struct ethhdr, h_proto)) != ETH_P_IP) return 0; if (load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)) != IPPROTO_UDP || load_byte(skb, ETH_HLEN) != 0x45) return 0; if (ip_is_fragment(skb, ETH_HLEN)) return 0; if (load_half(skb, troff + offsetof(struct udphdr, dest)) == DEFAULT_PKTGEN_UDP_PORT) return TC_ACT_SHOT; return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/parse_ldabs.c
/* eBPF example program: * * - Loads eBPF program * * The eBPF program sets the sk_bound_dev_if index in new AF_INET{6} * sockets opened by processes in the cgroup. * * - Attaches the new program to a cgroup using BPF_PROG_ATTACH */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <unistd.h> #include <assert.h> #include <errno.h> #include <fcntl.h> #include <net/if.h> #include <inttypes.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include "bpf_insn.h" char bpf_log_buf[BPF_LOG_BUF_SIZE]; static int prog_load(__u32 idx, __u32 mark, __u32 prio) { /* save pointer to context */ struct bpf_insn prog_start[] = { BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), }; struct bpf_insn prog_end[] = { BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = verdict */ BPF_EXIT_INSN(), }; /* set sk_bound_dev_if on socket */ struct bpf_insn prog_dev[] = { BPF_MOV64_IMM(BPF_REG_3, idx), BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, bound_dev_if)), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, bound_dev_if)), }; /* set mark on socket */ struct bpf_insn prog_mark[] = { /* get uid of process */ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_uid_gid), BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffffffff), /* if uid is 0, use given mark, else use the uid as the mark */ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_MOV64_IMM(BPF_REG_3, mark), /* set the mark on the new socket */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, mark)), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, mark)), }; /* set priority on socket */ struct bpf_insn prog_prio[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_IMM(BPF_REG_3, prio), BPF_MOV64_IMM(BPF_REG_2, offsetof(struct bpf_sock, priority)), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, offsetof(struct bpf_sock, priority)), }; LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = bpf_log_buf, .log_size = BPF_LOG_BUF_SIZE, ); struct bpf_insn *prog; size_t insns_cnt; void *p; int ret; insns_cnt = sizeof(prog_start) + sizeof(prog_end); if (idx) insns_cnt += sizeof(prog_dev); if (mark) insns_cnt += sizeof(prog_mark); if (prio) insns_cnt += sizeof(prog_prio); p = prog = malloc(insns_cnt); if (!prog) { fprintf(stderr, "Failed to allocate memory for instructions\n"); return EXIT_FAILURE; } memcpy(p, prog_start, sizeof(prog_start)); p += sizeof(prog_start); if (idx) { memcpy(p, prog_dev, sizeof(prog_dev)); p += sizeof(prog_dev); } if (mark) { memcpy(p, prog_mark, sizeof(prog_mark)); p += sizeof(prog_mark); } if (prio) { memcpy(p, prog_prio, sizeof(prog_prio)); p += sizeof(prog_prio); } memcpy(p, prog_end, sizeof(prog_end)); p += sizeof(prog_end); insns_cnt /= sizeof(struct bpf_insn); ret = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", prog, insns_cnt, &opts); free(prog); return ret; } static int get_bind_to_device(int sd, char *name, size_t len) { socklen_t optlen = len; int rc; name[0] = '\0'; rc = getsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, name, &optlen); if (rc < 0) perror("setsockopt(SO_BINDTODEVICE)"); return rc; } static unsigned int get_somark(int sd) { unsigned int mark = 0; socklen_t optlen = sizeof(mark); int rc; rc = getsockopt(sd, SOL_SOCKET, SO_MARK, &mark, &optlen); if (rc < 0) perror("getsockopt(SO_MARK)"); return mark; } static unsigned int get_priority(int sd) { unsigned int prio = 0; socklen_t optlen = sizeof(prio); int rc; rc = getsockopt(sd, SOL_SOCKET, SO_PRIORITY, &prio, &optlen); if (rc < 0) perror("getsockopt(SO_PRIORITY)"); return prio; } static int show_sockopts(int family) { unsigned int mark, prio; char name[16]; int sd; sd = socket(family, SOCK_DGRAM, 17); if (sd < 0) { perror("socket"); return 1; } if (get_bind_to_device(sd, name, sizeof(name)) < 0) return 1; mark = get_somark(sd); prio = get_priority(sd); close(sd); printf("sd %d: dev %s, mark %u, priority %u\n", sd, name, mark, prio); return 0; } static int usage(const char *argv0) { printf("Usage:\n"); printf(" Attach a program\n"); printf(" %s -b bind-to-dev -m mark -p prio cg-path\n", argv0); printf("\n"); printf(" Detach a program\n"); printf(" %s -d cg-path\n", argv0); printf("\n"); printf(" Show inherited socket settings (mark, priority, and device)\n"); printf(" %s [-6]\n", argv0); return EXIT_FAILURE; } int main(int argc, char **argv) { __u32 idx = 0, mark = 0, prio = 0; const char *cgrp_path = NULL; int cg_fd, prog_fd, ret; int family = PF_INET; int do_attach = 1; int rc; while ((rc = getopt(argc, argv, "db:m:p:6")) != -1) { switch (rc) { case 'd': do_attach = 0; break; case 'b': idx = if_nametoindex(optarg); if (!idx) { idx = strtoumax(optarg, NULL, 0); if (!idx) { printf("Invalid device name\n"); return EXIT_FAILURE; } } break; case 'm': mark = strtoumax(optarg, NULL, 0); break; case 'p': prio = strtoumax(optarg, NULL, 0); break; case '6': family = PF_INET6; break; default: return usage(argv[0]); } } if (optind == argc) return show_sockopts(family); cgrp_path = argv[optind]; if (!cgrp_path) { fprintf(stderr, "cgroup path not given\n"); return EXIT_FAILURE; } if (do_attach && !idx && !mark && !prio) { fprintf(stderr, "One of device, mark or priority must be given\n"); return EXIT_FAILURE; } cg_fd = open(cgrp_path, O_DIRECTORY | O_RDONLY); if (cg_fd < 0) { printf("Failed to open cgroup path: '%s'\n", strerror(errno)); return EXIT_FAILURE; } if (do_attach) { prog_fd = prog_load(idx, mark, prio); if (prog_fd < 0) { printf("Failed to load prog: '%s'\n", strerror(errno)); printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf); return EXIT_FAILURE; } ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0); if (ret < 0) { printf("Failed to attach prog to cgroup: '%s'\n", strerror(errno)); return EXIT_FAILURE; } } else { ret = bpf_prog_detach(cg_fd, BPF_CGROUP_INET_SOCK_CREATE); if (ret < 0) { printf("Failed to detach prog from cgroup: '%s'\n", strerror(errno)); return EXIT_FAILURE; } } close(cg_fd); return EXIT_SUCCESS; }
linux-master
samples/bpf/test_cgrp2_sock.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program shows how to use bpf_xdp_adjust_head() by * encapsulating the incoming packet in an IPv4/v6 header * and then XDP_TX it out. */ #define KBUILD_MODNAME "foo" #include <uapi/linux/bpf.h> #include <linux/in.h> #include <linux/if_ether.h> #include <linux/if_packet.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <bpf/bpf_helpers.h> #include "xdp_tx_iptunnel_common.h" struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __type(key, __u32); __type(value, __u64); __uint(max_entries, 256); } rxcnt SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, struct vip); __type(value, struct iptnl_info); __uint(max_entries, MAX_IPTNL_ENTRIES); } vip2tnl SEC(".maps"); static __always_inline void count_tx(u32 protocol) { u64 *rxcnt_count; rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); if (rxcnt_count) *rxcnt_count += 1; } static __always_inline int get_dport(void *trans_data, void *data_end, u8 protocol) { struct tcphdr *th; struct udphdr *uh; switch (protocol) { case IPPROTO_TCP: th = (struct tcphdr *)trans_data; if (th + 1 > data_end) return -1; return th->dest; case IPPROTO_UDP: uh = (struct udphdr *)trans_data; if (uh + 1 > data_end) return -1; return uh->dest; default: return 0; } } static __always_inline void set_ethhdr(struct ethhdr *new_eth, const struct ethhdr *old_eth, const struct iptnl_info *tnl, __be16 h_proto) { memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); new_eth->h_proto = h_proto; } static __always_inline int handle_ipv4(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; struct iptnl_info *tnl; struct ethhdr *new_eth; struct ethhdr *old_eth; struct iphdr *iph = data + sizeof(struct ethhdr); u16 *next_iph_u16; u16 payload_len; struct vip vip = {}; int dport; u32 csum = 0; int i; if (iph + 1 > data_end) return XDP_DROP; dport = get_dport(iph + 1, data_end, iph->protocol); if (dport == -1) return XDP_DROP; vip.protocol = iph->protocol; vip.family = AF_INET; vip.daddr.v4 = iph->daddr; vip.dport = dport; payload_len = ntohs(iph->tot_len); tnl = bpf_map_lookup_elem(&vip2tnl, &vip); /* It only does v4-in-v4 */ if (!tnl || tnl->family != AF_INET) return XDP_PASS; /* The vip key is found. Add an IP header and send it out */ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) return XDP_DROP; data = (void *)(long)xdp->data; data_end = (void *)(long)xdp->data_end; new_eth = data; iph = data + sizeof(*new_eth); old_eth = data + sizeof(*iph); if (new_eth + 1 > data_end || old_eth + 1 > data_end || iph + 1 > data_end) return XDP_DROP; set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IP)); iph->version = 4; iph->ihl = sizeof(*iph) >> 2; iph->frag_off = 0; iph->protocol = IPPROTO_IPIP; iph->check = 0; iph->tos = 0; iph->tot_len = htons(payload_len + sizeof(*iph)); iph->daddr = tnl->daddr.v4; iph->saddr = tnl->saddr.v4; iph->ttl = 8; next_iph_u16 = (u16 *)iph; #pragma clang loop unroll(full) for (i = 0; i < sizeof(*iph) >> 1; i++) csum += *next_iph_u16++; iph->check = ~((csum & 0xffff) + (csum >> 16)); count_tx(vip.protocol); return XDP_TX; } static __always_inline int handle_ipv6(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; struct iptnl_info *tnl; struct ethhdr *new_eth; struct ethhdr *old_eth; struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); __u16 payload_len; struct vip vip = {}; int dport; if (ip6h + 1 > data_end) return XDP_DROP; dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr); if (dport == -1) return XDP_DROP; vip.protocol = ip6h->nexthdr; vip.family = AF_INET6; memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); vip.dport = dport; payload_len = ip6h->payload_len; tnl = bpf_map_lookup_elem(&vip2tnl, &vip); /* It only does v6-in-v6 */ if (!tnl || tnl->family != AF_INET6) return XDP_PASS; /* The vip key is found. Add an IP header and send it out */ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) return XDP_DROP; data = (void *)(long)xdp->data; data_end = (void *)(long)xdp->data_end; new_eth = data; ip6h = data + sizeof(*new_eth); old_eth = data + sizeof(*ip6h); if (new_eth + 1 > data_end || old_eth + 1 > data_end || ip6h + 1 > data_end) return XDP_DROP; set_ethhdr(new_eth, old_eth, tnl, htons(ETH_P_IPV6)); ip6h->version = 6; ip6h->priority = 0; memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); ip6h->payload_len = htons(ntohs(payload_len) + sizeof(*ip6h)); ip6h->nexthdr = IPPROTO_IPV6; ip6h->hop_limit = 8; memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); count_tx(vip.protocol); return XDP_TX; } SEC("xdp.frags") int _xdp_tx_iptunnel(struct xdp_md *xdp) { void *data_end = (void *)(long)xdp->data_end; void *data = (void *)(long)xdp->data; struct ethhdr *eth = data; __u16 h_proto; if (eth + 1 > data_end) return XDP_DROP; h_proto = eth->h_proto; if (h_proto == htons(ETH_P_IP)) return handle_ipv4(xdp); else if (h_proto == htons(ETH_P_IPV6)) return handle_ipv6(xdp); else return XDP_PASS; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/xdp_tx_iptunnel_kern.c
/* Copyright (c) 2017 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Sample BPF program to set send and receive buffers to 150KB, sndcwnd clamp * to 100 packets and SYN and SYN_ACK RTOs to 10ms when both hosts are within * the same datacenter. For his example, we assume they are within the same * datacenter when the first 5.5 bytes of their IPv6 addresses are the same. * * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program. */ #include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <linux/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #define DEBUG 1 SEC("sockops") int bpf_clamp(struct bpf_sock_ops *skops) { int bufsize = 150000; int to_init = 10; int clamp = 100; int rv = 0; int op; /* For testing purposes, only execute rest of BPF program * if neither port numberis 55601 */ if (bpf_ntohl(skops->remote_port) != 55601 && skops->local_port != 55601) { skops->reply = -1; return 0; } op = (int) skops->op; #ifdef DEBUG bpf_printk("BPF command: %d\n", op); #endif /* Check that both hosts are within same datacenter. For this example * it is the case when the first 5.5 bytes of their IPv6 addresses are * the same. */ if (skops->family == AF_INET6 && skops->local_ip6[0] == skops->remote_ip6[0] && (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) == (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) { switch (op) { case BPF_SOCK_OPS_TIMEOUT_INIT: rv = to_init; break; case BPF_SOCK_OPS_TCP_CONNECT_CB: /* Set sndbuf and rcvbuf of active connections */ rv = bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof(bufsize)); break; case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: rv = bpf_setsockopt(skops, SOL_TCP, TCP_BPF_SNDCWND_CLAMP, &clamp, sizeof(clamp)); break; case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: /* Set sndbuf and rcvbuf of passive connections */ rv = bpf_setsockopt(skops, SOL_TCP, TCP_BPF_SNDCWND_CLAMP, &clamp, sizeof(clamp)); rv += bpf_setsockopt(skops, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); rv += bpf_setsockopt(skops, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof(bufsize)); break; default: rv = -1; } } else { rv = -1; } #ifdef DEBUG bpf_printk("Returning %d\n", rv); #endif skops->reply = rv; return 1; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/tcp_clamp_kern.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <unistd.h> #include <string.h> #include <assert.h> #include <bpf/libbpf.h> #include <bpf/bpf.h> #include "trace_helpers.h" int main(int ac, char **argv) { struct bpf_object *obj = NULL; struct bpf_link *links[20]; long key, next_key, value; struct bpf_program *prog; int map_fd, i, j = 0; char filename[256]; struct ksym *sym; if (load_kallsyms()) { printf("failed to process /proc/kallsyms\n"); return 2; } snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); obj = NULL; goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd = bpf_object__find_map_fd_by_name(obj, "my_map"); if (map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { links[j] = bpf_program__attach(prog); if (libbpf_get_error(links[j])) { fprintf(stderr, "bpf_program__attach failed\n"); links[j] = NULL; goto cleanup; } j++; } for (i = 0; i < 5; i++) { key = 0; printf("kprobing funcs:"); while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) { bpf_map_lookup_elem(map_fd, &next_key, &value); assert(next_key == value); sym = ksym_search(value); key = next_key; if (!sym) { printf("ksym not found. Is kallsyms loaded?\n"); continue; } printf(" %s", sym->name); } if (key) printf("\n"); key = 0; while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) bpf_map_delete_elem(map_fd, &next_key); sleep(1); } cleanup: for (j--; j >= 0; j--) bpf_link__destroy(links[j]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/spintest_user.c
/* SPDX-License-Identifier: GPL-2.0 * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc. * * Example howto transfer info from XDP to SKB, e.g. skb->mark * ----------------------------------------------------------- * This uses the XDP data_meta infrastructure, and is a cooperation * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook. * * Notice: This example does not use the BPF C-loader, * but instead rely on the iproute2 TC tool for loading BPF-objects. */ #include <uapi/linux/bpf.h> #include <uapi/linux/pkt_cls.h> #include <bpf/bpf_helpers.h> /* * This struct is stored in the XDP 'data_meta' area, which is located * just in-front-of the raw packet payload data. The meaning is * specific to these two BPF programs that use it as a communication * channel. XDP adjust/increase the area via a bpf-helper, and TC use * boundary checks to see if data have been provided. * * The struct must be 4 byte aligned, which here is enforced by the * struct __attribute__((aligned(4))). */ struct meta_info { __u32 mark; } __attribute__((aligned(4))); SEC("xdp_mark") int _xdp_mark(struct xdp_md *ctx) { struct meta_info *meta; void *data, *data_end; int ret; /* Reserve space in-front of data pointer for our meta info. * (Notice drivers not supporting data_meta will fail here!) */ ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta)); if (ret < 0) return XDP_ABORTED; /* Notice: Kernel-side verifier requires that loading of * ctx->data MUST happen _after_ helper bpf_xdp_adjust_meta(), * as pkt-data pointers are invalidated. Helpers that require * this are determined/marked by bpf_helper_changes_pkt_data() */ data = (void *)(unsigned long)ctx->data; /* Check data_meta have room for meta_info struct */ meta = (void *)(unsigned long)ctx->data_meta; if (meta + 1 > data) return XDP_ABORTED; meta->mark = 42; return XDP_PASS; } SEC("tc_mark") int _tc_mark(struct __sk_buff *ctx) { void *data = (void *)(unsigned long)ctx->data; void *data_end = (void *)(unsigned long)ctx->data_end; void *data_meta = (void *)(unsigned long)ctx->data_meta; struct meta_info *meta = data_meta; /* Check XDP gave us some data_meta */ if (meta + 1 > data) { ctx->mark = 41; /* Skip "accept" if no data_meta is avail */ return TC_ACT_OK; } /* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */ ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */ return TC_ACT_OK; } /* Manually attaching these programs: export DEV=ixgbe2 export FILE=xdp2skb_meta_kern.o # via TC command tc qdisc del dev $DEV clsact 2> /dev/null tc qdisc add dev $DEV clsact tc filter add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark tc filter show dev $DEV ingress # XDP via IP command: ip link set dev $DEV xdp off ip link set dev $DEV xdp obj $FILE sec xdp_mark # Use iptable to "see" if SKBs are marked iptables -I INPUT -p icmp -m mark --mark 41 # == 0x29 iptables -I INPUT -p icmp -m mark --mark 42 # == 0x2a # Hint: catch XDP_ABORTED errors via perf record -e xdp:* perf script */
linux-master
samples/bpf/xdp2skb_meta_kern.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define KBUILD_MODNAME "foo" #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/tcp.h> #include <linux/udp.h> #include <uapi/linux/bpf.h> #include <net/ip.h> #include <bpf/bpf_helpers.h> #define DEFAULT_PKTGEN_UDP_PORT 9 #define DEBUG 0 static int tcp(void *data, uint64_t tp_off, void *data_end) { struct tcphdr *tcp = data + tp_off; if (tcp + 1 > data_end) return 0; if (tcp->dest == htons(80) || tcp->source == htons(80)) return TC_ACT_SHOT; return 0; } static int udp(void *data, uint64_t tp_off, void *data_end) { struct udphdr *udp = data + tp_off; if (udp + 1 > data_end) return 0; if (udp->dest == htons(DEFAULT_PKTGEN_UDP_PORT) || udp->source == htons(DEFAULT_PKTGEN_UDP_PORT)) { if (DEBUG) { char fmt[] = "udp port 9 indeed\n"; bpf_trace_printk(fmt, sizeof(fmt)); } return TC_ACT_SHOT; } return 0; } static int parse_ipv4(void *data, uint64_t nh_off, void *data_end) { struct iphdr *iph; uint64_t ihl_len; iph = data + nh_off; if (iph + 1 > data_end) return 0; if (ip_is_fragment(iph)) return 0; ihl_len = iph->ihl * 4; if (iph->protocol == IPPROTO_IPIP) { iph = data + nh_off + ihl_len; if (iph + 1 > data_end) return 0; ihl_len += iph->ihl * 4; } if (iph->protocol == IPPROTO_TCP) return tcp(data, nh_off + ihl_len, data_end); else if (iph->protocol == IPPROTO_UDP) return udp(data, nh_off + ihl_len, data_end); return 0; } static int parse_ipv6(void *data, uint64_t nh_off, void *data_end) { struct ipv6hdr *ip6h; struct iphdr *iph; uint64_t ihl_len = sizeof(struct ipv6hdr); uint64_t nexthdr; ip6h = data + nh_off; if (ip6h + 1 > data_end) return 0; nexthdr = ip6h->nexthdr; if (nexthdr == IPPROTO_IPIP) { iph = data + nh_off + ihl_len; if (iph + 1 > data_end) return 0; ihl_len += iph->ihl * 4; nexthdr = iph->protocol; } else if (nexthdr == IPPROTO_IPV6) { ip6h = data + nh_off + ihl_len; if (ip6h + 1 > data_end) return 0; ihl_len += sizeof(struct ipv6hdr); nexthdr = ip6h->nexthdr; } if (nexthdr == IPPROTO_TCP) return tcp(data, nh_off + ihl_len, data_end); else if (nexthdr == IPPROTO_UDP) return udp(data, nh_off + ihl_len, data_end); return 0; } SEC("varlen") int handle_ingress(struct __sk_buff *skb) { void *data = (void *)(long)skb->data; struct ethhdr *eth = data; void *data_end = (void *)(long)skb->data_end; uint64_t h_proto, nh_off; nh_off = sizeof(*eth); if (data + nh_off > data_end) return 0; h_proto = eth->h_proto; if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) { struct vlan_hdr *vhdr; vhdr = data + nh_off; nh_off += sizeof(struct vlan_hdr); if (data + nh_off > data_end) return 0; h_proto = vhdr->h_vlan_encapsulated_proto; } if (h_proto == ETH_P_8021Q || h_proto == ETH_P_8021AD) { struct vlan_hdr *vhdr; vhdr = data + nh_off; nh_off += sizeof(struct vlan_hdr); if (data + nh_off > data_end) return 0; h_proto = vhdr->h_vlan_encapsulated_proto; } if (h_proto == htons(ETH_P_IP)) return parse_ipv4(data, nh_off, data_end); else if (h_proto == htons(ETH_P_IPV6)) return parse_ipv6(data, nh_off, data_end); return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/parse_varlen.c
/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, long); __type(value, long); __uint(max_entries, 1024); } my_map SEC(".maps"); /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe * example will no longer be meaningful */ SEC("kprobe/kfree_skb_reason") int bpf_prog2(struct pt_regs *ctx) { long loc = 0; long init_val = 1; long *value; /* read ip of kfree_skb_reason caller. * non-portable version of __builtin_return_address(0) */ BPF_KPROBE_READ_RET_IP(loc, ctx); value = bpf_map_lookup_elem(&my_map, &loc); if (value) *value += 1; else bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY); return 0; } static unsigned int log2(unsigned int v) { unsigned int r; unsigned int shift; r = (v > 0xFFFF) << 4; v >>= r; shift = (v > 0xFF) << 3; v >>= shift; r |= shift; shift = (v > 0xF) << 2; v >>= shift; r |= shift; shift = (v > 0x3) << 1; v >>= shift; r |= shift; r |= (v >> 1); return r; } static unsigned int log2l(unsigned long v) { unsigned int hi = v >> 32; if (hi) return log2(hi) + 32; else return log2(v); } struct hist_key { char comm[16]; u64 pid_tgid; u64 uid_gid; u64 index; }; struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(key_size, sizeof(struct hist_key)); __uint(value_size, sizeof(long)); __uint(max_entries, 1024); } my_hist_map SEC(".maps"); SEC("ksyscall/write") int BPF_KSYSCALL(bpf_prog3, unsigned int fd, const char *buf, size_t count) { long init_val = 1; long *value; struct hist_key key; key.index = log2l(count); key.pid_tgid = bpf_get_current_pid_tgid(); key.uid_gid = bpf_get_current_uid_gid(); bpf_get_current_comm(&key.comm, sizeof(key.comm)); value = bpf_map_lookup_elem(&my_hist_map, &key); if (value) __sync_fetch_and_add(value, 1); else bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex2.bpf.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2017 Cavium, Inc. */ #include <linux/bpf.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <assert.h> #include <errno.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <unistd.h> #include <bpf/bpf.h> #include <arpa/inet.h> #include <fcntl.h> #include <poll.h> #include <net/if.h> #include <netdb.h> #include <sys/ioctl.h> #include <sys/syscall.h> #include "bpf_util.h" #include <bpf/libbpf.h> #include <libgen.h> #include <getopt.h> #include <pthread.h> #include "xdp_sample_user.h" #include "xdp_router_ipv4.skel.h" static const char *__doc__ = "XDP IPv4 router implementation\n" "Usage: xdp_router_ipv4 <IFNAME-0> ... <IFNAME-N>\n"; static char buf[8192]; static int lpm_map_fd; static int arp_table_map_fd; static int exact_match_map_fd; static int tx_port_map_fd; static bool routes_thread_exit; static int interval = 5; static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_EXCEPTION_CNT; DEFINE_SAMPLE_INIT(xdp_router_ipv4); static const struct option long_options[] = { { "help", no_argument, NULL, 'h' }, { "skb-mode", no_argument, NULL, 'S' }, { "force", no_argument, NULL, 'F' }, { "interval", required_argument, NULL, 'i' }, { "verbose", no_argument, NULL, 'v' }, { "stats", no_argument, NULL, 's' }, {} }; static int get_route_table(int rtm_family); static int recv_msg(struct sockaddr_nl sock_addr, int sock) { struct nlmsghdr *nh; int len, nll = 0; char *buf_ptr; buf_ptr = buf; while (1) { len = recv(sock, buf_ptr, sizeof(buf) - nll, 0); if (len < 0) return len; nh = (struct nlmsghdr *)buf_ptr; if (nh->nlmsg_type == NLMSG_DONE) break; buf_ptr += len; nll += len; if ((sock_addr.nl_groups & RTMGRP_NEIGH) == RTMGRP_NEIGH) break; if ((sock_addr.nl_groups & RTMGRP_IPV4_ROUTE) == RTMGRP_IPV4_ROUTE) break; } return nll; } /* Function to parse the route entry returned by netlink * Updates the route entry related map entries */ static void read_route(struct nlmsghdr *nh, int nll) { char dsts[24], gws[24], ifs[16], dsts_len[24], metrics[24]; struct bpf_lpm_trie_key *prefix_key; struct rtattr *rt_attr; struct rtmsg *rt_msg; int rtm_family; int rtl; int i; struct route_table { int dst_len, iface, metric; __be32 dst, gw; __be64 mac; } route; struct arp_table { __be64 mac; __be32 dst; }; struct direct_map { struct arp_table arp; int ifindex; __be64 mac; } direct_entry; memset(&route, 0, sizeof(route)); for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { rt_msg = (struct rtmsg *)NLMSG_DATA(nh); rtm_family = rt_msg->rtm_family; if (rtm_family == AF_INET) if (rt_msg->rtm_table != RT_TABLE_MAIN) continue; rt_attr = (struct rtattr *)RTM_RTA(rt_msg); rtl = RTM_PAYLOAD(nh); for (; RTA_OK(rt_attr, rtl); rt_attr = RTA_NEXT(rt_attr, rtl)) { switch (rt_attr->rta_type) { case NDA_DST: sprintf(dsts, "%u", (*((__be32 *)RTA_DATA(rt_attr)))); break; case RTA_GATEWAY: sprintf(gws, "%u", *((__be32 *)RTA_DATA(rt_attr))); break; case RTA_OIF: sprintf(ifs, "%u", *((int *)RTA_DATA(rt_attr))); break; case RTA_METRICS: sprintf(metrics, "%u", *((int *)RTA_DATA(rt_attr))); default: break; } } sprintf(dsts_len, "%d", rt_msg->rtm_dst_len); route.dst = atoi(dsts); route.dst_len = atoi(dsts_len); route.gw = atoi(gws); route.iface = atoi(ifs); route.metric = atoi(metrics); assert(get_mac_addr(route.iface, &route.mac) == 0); assert(bpf_map_update_elem(tx_port_map_fd, &route.iface, &route.iface, 0) == 0); if (rtm_family == AF_INET) { struct trie_value { __u8 prefix[4]; __be64 value; int ifindex; int metric; __be32 gw; } *prefix_value; prefix_key = alloca(sizeof(*prefix_key) + 4); prefix_value = alloca(sizeof(*prefix_value)); prefix_key->prefixlen = 32; prefix_key->prefixlen = route.dst_len; direct_entry.mac = route.mac & 0xffffffffffff; direct_entry.ifindex = route.iface; direct_entry.arp.mac = 0; direct_entry.arp.dst = 0; if (route.dst_len == 32) { if (nh->nlmsg_type == RTM_DELROUTE) { assert(bpf_map_delete_elem(exact_match_map_fd, &route.dst) == 0); } else { if (bpf_map_lookup_elem(arp_table_map_fd, &route.dst, &direct_entry.arp.mac) == 0) direct_entry.arp.dst = route.dst; assert(bpf_map_update_elem(exact_match_map_fd, &route.dst, &direct_entry, 0) == 0); } } for (i = 0; i < 4; i++) prefix_key->data[i] = (route.dst >> i * 8) & 0xff; if (bpf_map_lookup_elem(lpm_map_fd, prefix_key, prefix_value) < 0) { for (i = 0; i < 4; i++) prefix_value->prefix[i] = prefix_key->data[i]; prefix_value->value = route.mac & 0xffffffffffff; prefix_value->ifindex = route.iface; prefix_value->gw = route.gw; prefix_value->metric = route.metric; assert(bpf_map_update_elem(lpm_map_fd, prefix_key, prefix_value, 0 ) == 0); } else { if (nh->nlmsg_type == RTM_DELROUTE) { assert(bpf_map_delete_elem(lpm_map_fd, prefix_key ) == 0); /* Rereading the route table to check if * there is an entry with the same * prefix but a different metric as the * deleted entry. */ get_route_table(AF_INET); } else if (prefix_key->data[0] == prefix_value->prefix[0] && prefix_key->data[1] == prefix_value->prefix[1] && prefix_key->data[2] == prefix_value->prefix[2] && prefix_key->data[3] == prefix_value->prefix[3] && route.metric >= prefix_value->metric) { continue; } else { for (i = 0; i < 4; i++) prefix_value->prefix[i] = prefix_key->data[i]; prefix_value->value = route.mac & 0xffffffffffff; prefix_value->ifindex = route.iface; prefix_value->gw = route.gw; prefix_value->metric = route.metric; assert(bpf_map_update_elem(lpm_map_fd, prefix_key, prefix_value, 0) == 0); } } } memset(&route, 0, sizeof(route)); memset(dsts, 0, sizeof(dsts)); memset(dsts_len, 0, sizeof(dsts_len)); memset(gws, 0, sizeof(gws)); memset(ifs, 0, sizeof(ifs)); memset(&route, 0, sizeof(route)); } } /* Function to read the existing route table when the process is launched*/ static int get_route_table(int rtm_family) { struct sockaddr_nl sa; struct nlmsghdr *nh; int sock, seq = 0; struct msghdr msg; struct iovec iov; int ret = 0; int nll; struct { struct nlmsghdr nl; struct rtmsg rt; char buf[8192]; } req; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock < 0) { fprintf(stderr, "open netlink socket: %s\n", strerror(errno)); return -errno; } memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { fprintf(stderr, "bind netlink socket: %s\n", strerror(errno)); ret = -errno; goto cleanup; } memset(&req, 0, sizeof(req)); req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; req.nl.nlmsg_type = RTM_GETROUTE; req.rt.rtm_family = rtm_family; req.rt.rtm_table = RT_TABLE_MAIN; req.nl.nlmsg_pid = 0; req.nl.nlmsg_seq = ++seq; memset(&msg, 0, sizeof(msg)); iov.iov_base = (void *)&req.nl; iov.iov_len = req.nl.nlmsg_len; msg.msg_iov = &iov; msg.msg_iovlen = 1; ret = sendmsg(sock, &msg, 0); if (ret < 0) { fprintf(stderr, "send to netlink: %s\n", strerror(errno)); ret = -errno; goto cleanup; } memset(buf, 0, sizeof(buf)); nll = recv_msg(sa, sock); if (nll < 0) { fprintf(stderr, "recv from netlink: %s\n", strerror(nll)); ret = nll; goto cleanup; } nh = (struct nlmsghdr *)buf; read_route(nh, nll); cleanup: close(sock); return ret; } /* Function to parse the arp entry returned by netlink * Updates the arp entry related map entries */ static void read_arp(struct nlmsghdr *nh, int nll) { struct rtattr *rt_attr; char dsts[24], mac[24]; struct ndmsg *rt_msg; int rtl, ndm_family; struct arp_table { __be64 mac; __be32 dst; } arp_entry; struct direct_map { struct arp_table arp; int ifindex; __be64 mac; } direct_entry; for (; NLMSG_OK(nh, nll); nh = NLMSG_NEXT(nh, nll)) { rt_msg = (struct ndmsg *)NLMSG_DATA(nh); rt_attr = (struct rtattr *)RTM_RTA(rt_msg); ndm_family = rt_msg->ndm_family; rtl = RTM_PAYLOAD(nh); for (; RTA_OK(rt_attr, rtl); rt_attr = RTA_NEXT(rt_attr, rtl)) { switch (rt_attr->rta_type) { case NDA_DST: sprintf(dsts, "%u", *((__be32 *)RTA_DATA(rt_attr))); break; case NDA_LLADDR: sprintf(mac, "%lld", *((__be64 *)RTA_DATA(rt_attr))); break; default: break; } } arp_entry.dst = atoi(dsts); arp_entry.mac = atol(mac); if (ndm_family == AF_INET) { if (bpf_map_lookup_elem(exact_match_map_fd, &arp_entry.dst, &direct_entry) == 0) { if (nh->nlmsg_type == RTM_DELNEIGH) { direct_entry.arp.dst = 0; direct_entry.arp.mac = 0; } else if (nh->nlmsg_type == RTM_NEWNEIGH) { direct_entry.arp.dst = arp_entry.dst; direct_entry.arp.mac = arp_entry.mac; } assert(bpf_map_update_elem(exact_match_map_fd, &arp_entry.dst, &direct_entry, 0 ) == 0); memset(&direct_entry, 0, sizeof(direct_entry)); } if (nh->nlmsg_type == RTM_DELNEIGH) { assert(bpf_map_delete_elem(arp_table_map_fd, &arp_entry.dst) == 0); } else if (nh->nlmsg_type == RTM_NEWNEIGH) { assert(bpf_map_update_elem(arp_table_map_fd, &arp_entry.dst, &arp_entry.mac, 0 ) == 0); } } memset(&arp_entry, 0, sizeof(arp_entry)); memset(dsts, 0, sizeof(dsts)); } } /* Function to read the existing arp table when the process is launched*/ static int get_arp_table(int rtm_family) { struct sockaddr_nl sa; struct nlmsghdr *nh; int sock, seq = 0; struct msghdr msg; struct iovec iov; int ret = 0; int nll; struct { struct nlmsghdr nl; struct ndmsg rt; char buf[8192]; } req; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock < 0) { fprintf(stderr, "open netlink socket: %s\n", strerror(errno)); return -errno; } memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { fprintf(stderr, "bind netlink socket: %s\n", strerror(errno)); ret = -errno; goto cleanup; } memset(&req, 0, sizeof(req)); req.nl.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); req.nl.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP; req.nl.nlmsg_type = RTM_GETNEIGH; req.rt.ndm_state = NUD_REACHABLE; req.rt.ndm_family = rtm_family; req.nl.nlmsg_pid = 0; req.nl.nlmsg_seq = ++seq; memset(&msg, 0, sizeof(msg)); iov.iov_base = (void *)&req.nl; iov.iov_len = req.nl.nlmsg_len; msg.msg_iov = &iov; msg.msg_iovlen = 1; ret = sendmsg(sock, &msg, 0); if (ret < 0) { fprintf(stderr, "send to netlink: %s\n", strerror(errno)); ret = -errno; goto cleanup; } memset(buf, 0, sizeof(buf)); nll = recv_msg(sa, sock); if (nll < 0) { fprintf(stderr, "recv from netlink: %s\n", strerror(nll)); ret = nll; goto cleanup; } nh = (struct nlmsghdr *)buf; read_arp(nh, nll); cleanup: close(sock); return ret; } /* Function to keep track and update changes in route and arp table * Give regular statistics of packets forwarded */ static void *monitor_routes_thread(void *arg) { struct pollfd fds_route, fds_arp; struct sockaddr_nl la, lr; int sock, sock_arp, nll; struct nlmsghdr *nh; sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock < 0) { fprintf(stderr, "open netlink socket: %s\n", strerror(errno)); return NULL; } fcntl(sock, F_SETFL, O_NONBLOCK); memset(&lr, 0, sizeof(lr)); lr.nl_family = AF_NETLINK; lr.nl_groups = RTMGRP_IPV6_ROUTE | RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY; if (bind(sock, (struct sockaddr *)&lr, sizeof(lr)) < 0) { fprintf(stderr, "bind netlink socket: %s\n", strerror(errno)); close(sock); return NULL; } fds_route.fd = sock; fds_route.events = POLL_IN; sock_arp = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (sock_arp < 0) { fprintf(stderr, "open netlink socket: %s\n", strerror(errno)); close(sock); return NULL; } fcntl(sock_arp, F_SETFL, O_NONBLOCK); memset(&la, 0, sizeof(la)); la.nl_family = AF_NETLINK; la.nl_groups = RTMGRP_NEIGH | RTMGRP_NOTIFY; if (bind(sock_arp, (struct sockaddr *)&la, sizeof(la)) < 0) { fprintf(stderr, "bind netlink socket: %s\n", strerror(errno)); goto cleanup; } fds_arp.fd = sock_arp; fds_arp.events = POLL_IN; /* dump route and arp tables */ if (get_arp_table(AF_INET) < 0) { fprintf(stderr, "Failed reading arp table\n"); goto cleanup; } if (get_route_table(AF_INET) < 0) { fprintf(stderr, "Failed reading route table\n"); goto cleanup; } while (!routes_thread_exit) { memset(buf, 0, sizeof(buf)); if (poll(&fds_route, 1, 3) == POLL_IN) { nll = recv_msg(lr, sock); if (nll < 0) { fprintf(stderr, "recv from netlink: %s\n", strerror(nll)); goto cleanup; } nh = (struct nlmsghdr *)buf; read_route(nh, nll); } memset(buf, 0, sizeof(buf)); if (poll(&fds_arp, 1, 3) == POLL_IN) { nll = recv_msg(la, sock_arp); if (nll < 0) { fprintf(stderr, "recv from netlink: %s\n", strerror(nll)); goto cleanup; } nh = (struct nlmsghdr *)buf; read_arp(nh, nll); } sleep(interval); } cleanup: close(sock_arp); close(sock); return NULL; } static void usage(char *argv[], const struct option *long_options, const char *doc, int mask, bool error, struct bpf_object *obj) { sample_usage(argv, long_options, doc, mask, error); } int main(int argc, char **argv) { bool error = true, generic = false, force = false; int opt, ret = EXIT_FAIL_BPF; struct xdp_router_ipv4 *skel; int i, total_ifindex = argc - 1; char **ifname_list = argv + 1; pthread_t routes_thread; int longindex = 0; if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) { fprintf(stderr, "Failed to set libbpf strict mode: %s\n", strerror(errno)); goto end; } skel = xdp_router_ipv4__open(); if (!skel) { fprintf(stderr, "Failed to xdp_router_ipv4__open: %s\n", strerror(errno)); goto end; } ret = sample_init_pre_load(skel); if (ret < 0) { fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret)); ret = EXIT_FAIL_BPF; goto end_destroy; } ret = xdp_router_ipv4__load(skel); if (ret < 0) { fprintf(stderr, "Failed to xdp_router_ipv4__load: %s\n", strerror(errno)); goto end_destroy; } ret = sample_init(skel, mask); if (ret < 0) { fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_destroy; } while ((opt = getopt_long(argc, argv, "si:SFvh", long_options, &longindex)) != -1) { switch (opt) { case 's': mask |= SAMPLE_REDIRECT_MAP_CNT; total_ifindex--; ifname_list++; break; case 'i': interval = strtoul(optarg, NULL, 0); total_ifindex -= 2; ifname_list += 2; break; case 'S': generic = true; total_ifindex--; ifname_list++; break; case 'F': force = true; total_ifindex--; ifname_list++; break; case 'v': sample_switch_mode(); total_ifindex--; ifname_list++; break; case 'h': error = false; default: usage(argv, long_options, __doc__, mask, error, skel->obj); goto end_destroy; } } ret = EXIT_FAIL_OPTION; if (optind == argc) { usage(argv, long_options, __doc__, mask, true, skel->obj); goto end_destroy; } lpm_map_fd = bpf_map__fd(skel->maps.lpm_map); if (lpm_map_fd < 0) { fprintf(stderr, "Failed loading lpm_map %s\n", strerror(-lpm_map_fd)); goto end_destroy; } arp_table_map_fd = bpf_map__fd(skel->maps.arp_table); if (arp_table_map_fd < 0) { fprintf(stderr, "Failed loading arp_table_map_fd %s\n", strerror(-arp_table_map_fd)); goto end_destroy; } exact_match_map_fd = bpf_map__fd(skel->maps.exact_match); if (exact_match_map_fd < 0) { fprintf(stderr, "Failed loading exact_match_map_fd %s\n", strerror(-exact_match_map_fd)); goto end_destroy; } tx_port_map_fd = bpf_map__fd(skel->maps.tx_port); if (tx_port_map_fd < 0) { fprintf(stderr, "Failed loading tx_port_map_fd %s\n", strerror(-tx_port_map_fd)); goto end_destroy; } ret = EXIT_FAIL_XDP; for (i = 0; i < total_ifindex; i++) { int index = if_nametoindex(ifname_list[i]); if (!index) { fprintf(stderr, "Interface %s not found %s\n", ifname_list[i], strerror(-tx_port_map_fd)); goto end_destroy; } if (sample_install_xdp(skel->progs.xdp_router_ipv4_prog, index, generic, force) < 0) goto end_destroy; } ret = pthread_create(&routes_thread, NULL, monitor_routes_thread, NULL); if (ret) { fprintf(stderr, "Failed creating routes_thread: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_destroy; } ret = sample_run(interval, NULL, NULL); routes_thread_exit = true; if (ret < 0) { fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret)); ret = EXIT_FAIL; goto end_thread_wait; } ret = EXIT_OK; end_thread_wait: pthread_join(routes_thread, NULL); end_destroy: xdp_router_ipv4__destroy(skel); end: sample_exit(ret); }
linux-master
samples/bpf/xdp_router_ipv4_user.c
/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include <uapi/linux/bpf.h> #include <uapi/linux/in.h> #include <uapi/linux/if.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/ip.h> #include <uapi/linux/ipv6.h> #include <uapi/linux/if_tunnel.h> #include <uapi/linux/mpls.h> #include <bpf/bpf_helpers.h> #include "bpf_legacy.h" #define IP_MF 0x2000 #define IP_OFFSET 0x1FFF #define PARSE_VLAN 1 #define PARSE_MPLS 2 #define PARSE_IP 3 #define PARSE_IPV6 4 struct vlan_hdr { __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; }; struct flow_key_record { __be32 src; __be32 dst; union { __be32 ports; __be16 port16[2]; }; __u32 ip_proto; }; static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto); static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) { return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) & (IP_MF | IP_OFFSET); } static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) { __u64 w0 = load_word(ctx, off); __u64 w1 = load_word(ctx, off + 4); __u64 w2 = load_word(ctx, off + 8); __u64 w3 = load_word(ctx, off + 12); return (__u32)(w0 ^ w1 ^ w2 ^ w3); } struct globals { struct flow_key_record flow; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, __u32); __type(value, struct globals); __uint(max_entries, 32); } percpu_map SEC(".maps"); /* user poor man's per_cpu until native support is ready */ static struct globals *this_cpu_globals(void) { u32 key = bpf_get_smp_processor_id(); return bpf_map_lookup_elem(&percpu_map, &key); } /* some simple stats for user space consumption */ struct pair { __u64 packets; __u64 bytes; }; struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, struct flow_key_record); __type(value, struct pair); __uint(max_entries, 1024); } hash_map SEC(".maps"); static void update_stats(struct __sk_buff *skb, struct globals *g) { struct flow_key_record key = g->flow; struct pair *value; value = bpf_map_lookup_elem(&hash_map, &key); if (value) { __sync_fetch_and_add(&value->packets, 1); __sync_fetch_and_add(&value->bytes, skb->len); } else { struct pair val = {1, skb->len}; bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY); } } static __always_inline void parse_ip_proto(struct __sk_buff *skb, struct globals *g, __u32 ip_proto) { __u32 nhoff = skb->cb[0]; int poff; switch (ip_proto) { case IPPROTO_GRE: { struct gre_hdr { __be16 flags; __be16 proto; }; __u32 gre_flags = load_half(skb, nhoff + offsetof(struct gre_hdr, flags)); __u32 gre_proto = load_half(skb, nhoff + offsetof(struct gre_hdr, proto)); if (gre_flags & (GRE_VERSION|GRE_ROUTING)) break; nhoff += 4; if (gre_flags & GRE_CSUM) nhoff += 4; if (gre_flags & GRE_KEY) nhoff += 4; if (gre_flags & GRE_SEQ) nhoff += 4; skb->cb[0] = nhoff; parse_eth_proto(skb, gre_proto); break; } case IPPROTO_IPIP: parse_eth_proto(skb, ETH_P_IP); break; case IPPROTO_IPV6: parse_eth_proto(skb, ETH_P_IPV6); break; case IPPROTO_TCP: case IPPROTO_UDP: g->flow.ports = load_word(skb, nhoff); case IPPROTO_ICMP: g->flow.ip_proto = ip_proto; update_stats(skb, g); break; default: break; } } SEC("socket") int bpf_func_ip(struct __sk_buff *skb) { struct globals *g = this_cpu_globals(); __u32 nhoff, verlen, ip_proto; if (!g) return 0; nhoff = skb->cb[0]; if (unlikely(ip_is_fragment(skb, nhoff))) return 0; ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol)); if (ip_proto != IPPROTO_GRE) { g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); } verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); nhoff += (verlen & 0xF) << 2; skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } SEC("socket") int bpf_func_ipv6(struct __sk_buff *skb) { struct globals *g = this_cpu_globals(); __u32 nhoff, ip_proto; if (!g) return 0; nhoff = skb->cb[0]; ip_proto = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr)); g->flow.src = ipv6_addr_hash(skb, nhoff + offsetof(struct ipv6hdr, saddr)); g->flow.dst = ipv6_addr_hash(skb, nhoff + offsetof(struct ipv6hdr, daddr)); nhoff += sizeof(struct ipv6hdr); skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } SEC("socket") int bpf_func_vlan(struct __sk_buff *skb) { __u32 nhoff, proto; nhoff = skb->cb[0]; proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); skb->cb[0] = nhoff; parse_eth_proto(skb, proto); return 0; } SEC("socket") int bpf_func_mpls(struct __sk_buff *skb) { __u32 nhoff, label; nhoff = skb->cb[0]; label = load_word(skb, nhoff); nhoff += sizeof(struct mpls_label); skb->cb[0] = nhoff; if (label & MPLS_LS_S_MASK) { __u8 verlen = load_byte(skb, nhoff); if ((verlen & 0xF0) == 4) parse_eth_proto(skb, ETH_P_IP); else parse_eth_proto(skb, ETH_P_IPV6); } else { parse_eth_proto(skb, ETH_P_MPLS_UC); } return 0; } struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(key_size, sizeof(u32)); __uint(max_entries, 8); __array(values, u32 (void *)); } prog_array_init SEC(".maps") = { .values = { [PARSE_VLAN] = (void *)&bpf_func_vlan, [PARSE_IP] = (void *)&bpf_func_ip, [PARSE_IPV6] = (void *)&bpf_func_ipv6, [PARSE_MPLS] = (void *)&bpf_func_mpls, }, }; /* Protocol dispatch routine. It tail-calls next BPF program depending * on eth proto. Note, we could have used ... * * bpf_tail_call(skb, &prog_array_init, proto); * * ... but it would need large prog_array and cannot be optimised given * the map key is not static. */ static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto) { switch (proto) { case ETH_P_8021Q: case ETH_P_8021AD: bpf_tail_call(skb, &prog_array_init, PARSE_VLAN); break; case ETH_P_MPLS_UC: case ETH_P_MPLS_MC: bpf_tail_call(skb, &prog_array_init, PARSE_MPLS); break; case ETH_P_IP: bpf_tail_call(skb, &prog_array_init, PARSE_IP); break; case ETH_P_IPV6: bpf_tail_call(skb, &prog_array_init, PARSE_IPV6); break; } } SEC("socket") int main_prog(struct __sk_buff *skb) { __u32 nhoff = ETH_HLEN; __u32 proto = load_half(skb, 12); skb->cb[0] = nhoff; parse_eth_proto(skb, proto); return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/sockex3_kern.c
// SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <sched.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <locale.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/wait.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> static int cstate_map_fd, pstate_map_fd; #define MAX_CPU 8 #define MAX_PSTATE_ENTRIES 5 #define MAX_CSTATE_ENTRIES 3 #define MAX_STARS 40 #define CPUFREQ_MAX_SYSFS_PATH "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq" #define CPUFREQ_LOWEST_FREQ "208000" #define CPUFREQ_HIGHEST_FREQ "12000000" struct cpu_stat_data { unsigned long cstate[MAX_CSTATE_ENTRIES]; unsigned long pstate[MAX_PSTATE_ENTRIES]; }; static struct cpu_stat_data stat_data[MAX_CPU]; static void cpu_stat_print(void) { int i, j; char state_str[sizeof("cstate-9")]; struct cpu_stat_data *data; /* Clear screen */ printf("\033[2J"); /* Header */ printf("\nCPU states statistics:\n"); printf("%-10s ", "state(ms)"); for (i = 0; i < MAX_CSTATE_ENTRIES; i++) { sprintf(state_str, "cstate-%d", i); printf("%-11s ", state_str); } for (i = 0; i < MAX_PSTATE_ENTRIES; i++) { sprintf(state_str, "pstate-%d", i); printf("%-11s ", state_str); } printf("\n"); for (j = 0; j < MAX_CPU; j++) { data = &stat_data[j]; printf("CPU-%-6d ", j); for (i = 0; i < MAX_CSTATE_ENTRIES; i++) printf("%-11ld ", data->cstate[i] / 1000000); for (i = 0; i < MAX_PSTATE_ENTRIES; i++) printf("%-11ld ", data->pstate[i] / 1000000); printf("\n"); } } static void cpu_stat_update(int cstate_fd, int pstate_fd) { unsigned long key, value; int c, i; for (c = 0; c < MAX_CPU; c++) { for (i = 0; i < MAX_CSTATE_ENTRIES; i++) { key = c * MAX_CSTATE_ENTRIES + i; bpf_map_lookup_elem(cstate_fd, &key, &value); stat_data[c].cstate[i] = value; } for (i = 0; i < MAX_PSTATE_ENTRIES; i++) { key = c * MAX_PSTATE_ENTRIES + i; bpf_map_lookup_elem(pstate_fd, &key, &value); stat_data[c].pstate[i] = value; } } } /* * This function is copied from 'idlestat' tool function * idlestat_wake_all() in idlestate.c. * * It sets the self running task affinity to cpus one by one so can wake up * the specific CPU to handle scheduling; this results in all cpus can be * waken up once and produce ftrace event 'trace_cpu_idle'. */ static int cpu_stat_inject_cpu_idle_event(void) { int rcpu, i, ret; cpu_set_t cpumask; cpu_set_t original_cpumask; ret = sysconf(_SC_NPROCESSORS_CONF); if (ret < 0) return -1; rcpu = sched_getcpu(); if (rcpu < 0) return -1; /* Keep track of the CPUs we will run on */ sched_getaffinity(0, sizeof(original_cpumask), &original_cpumask); for (i = 0; i < ret; i++) { /* Pointless to wake up ourself */ if (i == rcpu) continue; /* Pointless to wake CPUs we will not run on */ if (!CPU_ISSET(i, &original_cpumask)) continue; CPU_ZERO(&cpumask); CPU_SET(i, &cpumask); sched_setaffinity(0, sizeof(cpumask), &cpumask); } /* Enable all the CPUs of the original mask */ sched_setaffinity(0, sizeof(original_cpumask), &original_cpumask); return 0; } /* * It's possible to have no any frequency change for long time and cannot * get ftrace event 'trace_cpu_frequency' for long period, this introduces * big deviation for pstate statistics. * * To solve this issue, below code forces to set 'scaling_max_freq' to 208MHz * for triggering ftrace event 'trace_cpu_frequency' and then recovery back to * the maximum frequency value 1.2GHz. */ static int cpu_stat_inject_cpu_frequency_event(void) { int len, fd; fd = open(CPUFREQ_MAX_SYSFS_PATH, O_WRONLY); if (fd < 0) { printf("failed to open scaling_max_freq, errno=%d\n", errno); return fd; } len = write(fd, CPUFREQ_LOWEST_FREQ, strlen(CPUFREQ_LOWEST_FREQ)); if (len < 0) { printf("failed to open scaling_max_freq, errno=%d\n", errno); goto err; } len = write(fd, CPUFREQ_HIGHEST_FREQ, strlen(CPUFREQ_HIGHEST_FREQ)); if (len < 0) { printf("failed to open scaling_max_freq, errno=%d\n", errno); goto err; } err: close(fd); return len; } static void int_exit(int sig) { cpu_stat_inject_cpu_idle_event(); cpu_stat_inject_cpu_frequency_event(); cpu_stat_update(cstate_map_fd, pstate_map_fd); cpu_stat_print(); exit(0); } int main(int argc, char **argv) { struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; int ret; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (!prog) { printf("finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } cstate_map_fd = bpf_object__find_map_fd_by_name(obj, "cstate_duration"); pstate_map_fd = bpf_object__find_map_fd_by_name(obj, "pstate_duration"); if (cstate_map_fd < 0 || pstate_map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } ret = cpu_stat_inject_cpu_idle_event(); if (ret < 0) return 1; ret = cpu_stat_inject_cpu_frequency_event(); if (ret < 0) return 1; signal(SIGINT, int_exit); signal(SIGTERM, int_exit); while (1) { cpu_stat_update(cstate_map_fd, pstate_map_fd); cpu_stat_print(); sleep(5); } cleanup: bpf_link__destroy(link); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/cpustat_user.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <linux/filter.h> #include <linux/seccomp.h> #include <sys/prctl.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "trace_helpers.h" #include "bpf_util.h" #ifdef __mips__ #define MAX_ENTRIES 6000 /* MIPS n64 syscalls start at 5000 */ #else #define MAX_ENTRIES 1024 #endif /* install fake seccomp program to enable seccomp code path inside the kernel, * so that our kprobe attached to seccomp_phase1() can be triggered */ static void install_accept_all_seccomp(void) { struct sock_filter filter[] = { BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; if (prctl(PR_SET_SECCOMP, 2, &prog)) perror("prctl"); } int main(int ac, char **argv) { struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; int key, fd, progs_fd; const char *section; char filename[256]; FILE *f; snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (!prog) { printf("finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } progs_fd = bpf_object__find_map_fd_by_name(obj, "progs"); if (progs_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { section = bpf_program__section_name(prog); /* register only syscalls to PROG_ARRAY */ if (sscanf(section, "kprobe/%d", &key) != 1) continue; fd = bpf_program__fd(prog); bpf_map_update_elem(progs_fd, &key, &fd, BPF_ANY); } install_accept_all_seccomp(); f = popen("dd if=/dev/zero of=/dev/null count=5", "r"); (void) f; read_trace_pipe(); cleanup: bpf_link__destroy(link); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/tracex5_user.c
/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include "syscall_nrs.h" #include <linux/version.h> #include <uapi/linux/unistd.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> #define __stringify(x) #x #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(key_size, sizeof(u32)); __uint(value_size, sizeof(u32)); #ifdef __mips__ __uint(max_entries, 6000); /* MIPS n64 syscalls start at 5000 */ #else __uint(max_entries, 1024); #endif } progs SEC(".maps"); SEC("kprobe/__seccomp_filter") int bpf_prog1(struct pt_regs *ctx) { int sc_nr = (int)PT_REGS_PARM1(ctx); /* dispatch into next BPF program depending on syscall number */ bpf_tail_call(ctx, &progs, sc_nr); /* fall through -> unknown syscall */ if (sc_nr >= __NR_getuid && sc_nr <= __NR_getsid) { char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n"; bpf_trace_printk(fmt, sizeof(fmt), sc_nr); } return 0; } /* we jump here when syscall number == __NR_write */ PROG(SYS__NR_write)(struct pt_regs *ctx) { struct seccomp_data sd; bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] == 512) { char fmt[] = "write(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), sd.args[0], sd.args[1], sd.args[2]); } return 0; } PROG(SYS__NR_read)(struct pt_regs *ctx) { struct seccomp_data sd; bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] > 128 && sd.args[2] <= 1024) { char fmt[] = "read(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), sd.args[0], sd.args[1], sd.args[2]); } return 0; } #ifdef __NR_mmap2 PROG(SYS__NR_mmap2)(struct pt_regs *ctx) { char fmt[] = "mmap2\n"; bpf_trace_printk(fmt, sizeof(fmt)); return 0; } #endif #ifdef __NR_mmap PROG(SYS__NR_mmap)(struct pt_regs *ctx) { char fmt[] = "mmap\n"; bpf_trace_printk(fmt, sizeof(fmt)); return 0; } #endif char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex5.bpf.c
/* Copyright (c) 2017 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * BPF program to set congestion control to dctcp when both hosts are * in the same datacenter (as deteremined by IPv6 prefix). * * Use "bpftool cgroup attach $cg sock_ops $prog" to load this BPF program. */ #include <uapi/linux/bpf.h> #include <uapi/linux/tcp.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <linux/socket.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #define DEBUG 1 SEC("sockops") int bpf_cong(struct bpf_sock_ops *skops) { char cong[] = "dctcp"; int rv = 0; int op; /* For testing purposes, only execute rest of BPF program * if neither port numberis 55601 */ if (bpf_ntohl(skops->remote_port) != 55601 && skops->local_port != 55601) { skops->reply = -1; return 1; } op = (int) skops->op; #ifdef DEBUG bpf_printk("BPF command: %d\n", op); #endif /* Check if both hosts are in the same datacenter. For this * example they are if the 1st 5.5 bytes in the IPv6 address * are the same. */ if (skops->family == AF_INET6 && skops->local_ip6[0] == skops->remote_ip6[0] && (bpf_ntohl(skops->local_ip6[1]) & 0xfff00000) == (bpf_ntohl(skops->remote_ip6[1]) & 0xfff00000)) { switch (op) { case BPF_SOCK_OPS_NEEDS_ECN: rv = 1; break; case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: rv = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION, cong, sizeof(cong)); break; case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: rv = bpf_setsockopt(skops, SOL_TCP, TCP_CONGESTION, cong, sizeof(cong)); break; default: rv = -1; } } else { rv = -1; } #ifdef DEBUG bpf_printk("Returning %d\n", rv); #endif skops->reply = rv; return 1; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/tcp_cong_kern.c
#include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> SEC("kprobe/open_ctree") int bpf_prog1(struct pt_regs *ctx) { unsigned long rc = -12; bpf_override_return(ctx, rc); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex7.bpf.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <assert.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "sock_example.h" #include <unistd.h> #include <arpa/inet.h> int main(int ac, char **argv) { struct bpf_object *obj; struct bpf_program *prog; int map_fd, prog_fd; char filename[256]; int i, sock, err; FILE *f; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) return 1; prog = bpf_object__next_program(obj, NULL); bpf_program__set_type(prog, BPF_PROG_TYPE_SOCKET_FILTER); err = bpf_object__load(obj); if (err) return 1; prog_fd = bpf_program__fd(prog); map_fd = bpf_object__find_map_fd_by_name(obj, "my_map"); sock = open_raw_sock("lo"); assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) == 0); f = popen("ping -4 -c5 localhost", "r"); (void) f; for (i = 0; i < 5; i++) { long long tcp_cnt, udp_cnt, icmp_cnt; int key; key = IPPROTO_TCP; assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0); key = IPPROTO_UDP; assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0); key = IPPROTO_ICMP; assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0); printf("TCP %lld UDP %lld ICMP %lld bytes\n", tcp_cnt, udp_cnt, icmp_cnt); sleep(1); } return 0; }
linux-master
samples/bpf/sockex1_user.c
/* Copyright (c) 2016 Sargun Dhillon <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <string.h> #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, struct sockaddr_in); __type(value, struct sockaddr_in); __uint(max_entries, 256); } dnat_map SEC(".maps"); /* kprobe is NOT a stable ABI * kernel functions can be removed, renamed or completely change semantics. * Number of arguments and their positions can change, etc. * In such case this bpf+kprobe example will no longer be meaningful * * This example sits on a syscall, and the syscall ABI is relatively stable * of course, across platforms, and over time, the ABI may change. */ SEC("ksyscall/connect") int BPF_KSYSCALL(bpf_prog1, int fd, struct sockaddr_in *uservaddr, int addrlen) { struct sockaddr_in new_addr, orig_addr = {}; struct sockaddr_in *mapped_addr; if (addrlen > sizeof(orig_addr)) return 0; if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), uservaddr) != 0) return 0; mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr); if (mapped_addr != NULL) { memcpy(&new_addr, mapped_addr, sizeof(new_addr)); bpf_probe_write_user(uservaddr, &new_addr, sizeof(new_addr)); } return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/test_probe_write_user.bpf.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <assert.h> #include <unistd.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> int main(int ac, char **argv) { struct sockaddr_in *serv_addr_in, *mapped_addr_in, *tmp_addr_in; struct sockaddr serv_addr, mapped_addr, tmp_addr; int serverfd, serverconnfd, clientfd, map_fd; struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; socklen_t sockaddr_len; char filename[256]; char *ip; serv_addr_in = (struct sockaddr_in *)&serv_addr; mapped_addr_in = (struct sockaddr_in *)&mapped_addr; tmp_addr_in = (struct sockaddr_in *)&tmp_addr; snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (libbpf_get_error(prog)) { fprintf(stderr, "ERROR: finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd = bpf_object__find_map_fd_by_name(obj, "dnat_map"); if (map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } assert((serverfd = socket(AF_INET, SOCK_STREAM, 0)) > 0); assert((clientfd = socket(AF_INET, SOCK_STREAM, 0)) > 0); /* Bind server to ephemeral port on lo */ memset(&serv_addr, 0, sizeof(serv_addr)); serv_addr_in->sin_family = AF_INET; serv_addr_in->sin_port = 0; serv_addr_in->sin_addr.s_addr = htonl(INADDR_LOOPBACK); assert(bind(serverfd, &serv_addr, sizeof(serv_addr)) == 0); sockaddr_len = sizeof(serv_addr); assert(getsockname(serverfd, &serv_addr, &sockaddr_len) == 0); ip = inet_ntoa(serv_addr_in->sin_addr); printf("Server bound to: %s:%d\n", ip, ntohs(serv_addr_in->sin_port)); memset(&mapped_addr, 0, sizeof(mapped_addr)); mapped_addr_in->sin_family = AF_INET; mapped_addr_in->sin_port = htons(5555); mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255"); assert(!bpf_map_update_elem(map_fd, &mapped_addr, &serv_addr, BPF_ANY)); assert(listen(serverfd, 5) == 0); ip = inet_ntoa(mapped_addr_in->sin_addr); printf("Client connecting to: %s:%d\n", ip, ntohs(mapped_addr_in->sin_port)); assert(connect(clientfd, &mapped_addr, sizeof(mapped_addr)) == 0); sockaddr_len = sizeof(tmp_addr); ip = inet_ntoa(tmp_addr_in->sin_addr); assert((serverconnfd = accept(serverfd, &tmp_addr, &sockaddr_len)) > 0); printf("Server received connection from: %s:%d\n", ip, ntohs(tmp_addr_in->sin_port)); sockaddr_len = sizeof(tmp_addr); assert(getpeername(clientfd, &tmp_addr, &sockaddr_len) == 0); ip = inet_ntoa(tmp_addr_in->sin_addr); printf("Client's peer address: %s:%d\n", ip, ntohs(tmp_addr_in->sin_port)); /* Is the server's getsockname = the socket getpeername */ assert(memcmp(&serv_addr, &tmp_addr, sizeof(struct sockaddr_in)) == 0); cleanup: bpf_link__destroy(link); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/test_probe_write_user_user.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com * Copyright (c) 2015 BMW Car IT GmbH */ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <signal.h> #include <bpf/libbpf.h> #include <bpf/bpf.h> #define MAX_ENTRIES 20 #define MAX_CPU 4 #define MAX_STARS 40 struct cpu_hist { long data[MAX_ENTRIES]; long max; }; static struct cpu_hist cpu_hist[MAX_CPU]; static void stars(char *str, long val, long max, int width) { int i; for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++) str[i] = '*'; if (val > max) str[i - 1] = '+'; str[i] = '\0'; } static void print_hist(void) { char starstr[MAX_STARS]; struct cpu_hist *hist; int i, j; /* clear screen */ printf("\033[2J"); for (j = 0; j < MAX_CPU; j++) { hist = &cpu_hist[j]; /* ignore CPUs without data (maybe offline?) */ if (hist->max == 0) continue; printf("CPU %d\n", j); printf(" latency : count distribution\n"); for (i = 1; i <= MAX_ENTRIES; i++) { stars(starstr, hist->data[i - 1], hist->max, MAX_STARS); printf("%8ld -> %-8ld : %-8ld |%-*s|\n", (1l << i) >> 1, (1l << i) - 1, hist->data[i - 1], MAX_STARS, starstr); } } } static void get_data(int fd) { long key, value; int c, i; for (i = 0; i < MAX_CPU; i++) cpu_hist[i].max = 0; for (c = 0; c < MAX_CPU; c++) { for (i = 0; i < MAX_ENTRIES; i++) { key = c * MAX_ENTRIES + i; bpf_map_lookup_elem(fd, &key, &value); cpu_hist[c].data[i] = value; if (value > cpu_hist[c].max) cpu_hist[c].max = value; } } } int main(int argc, char **argv) { struct bpf_link *links[2]; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; int map_fd, i = 0; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } map_fd = bpf_object__find_map_fd_by_name(obj, "my_lat"); if (map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } bpf_object__for_each_program(prog, obj) { links[i] = bpf_program__attach(prog); if (libbpf_get_error(links[i])) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); links[i] = NULL; goto cleanup; } i++; } while (1) { get_data(map_fd); print_hist(); sleep(5); } cleanup: for (i--; i >= 0; i--) bpf_link__destroy(links[i]); bpf_object__close(obj); return 0; }
linux-master
samples/bpf/lathist_user.c
// SPDX-License-Identifier: GPL-2.0 #include <stdio.h> #include <assert.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "sock_example.h" #include <unistd.h> #include <arpa/inet.h> struct flow_key_record { __be32 src; __be32 dst; union { __be32 ports; __be16 port16[2]; }; __u32 ip_proto; }; struct pair { __u64 packets; __u64 bytes; }; int main(int argc, char **argv) { int i, sock, fd, main_prog_fd, hash_map_fd; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; FILE *f; snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } hash_map_fd = bpf_object__find_map_fd_by_name(obj, "hash_map"); if (hash_map_fd < 0) { fprintf(stderr, "ERROR: finding a map in obj file failed\n"); goto cleanup; } /* find BPF main program */ main_prog_fd = 0; bpf_object__for_each_program(prog, obj) { fd = bpf_program__fd(prog); if (!strcmp(bpf_program__name(prog), "main_prog")) main_prog_fd = fd; } if (main_prog_fd == 0) { fprintf(stderr, "ERROR: can't find main_prog\n"); goto cleanup; } sock = open_raw_sock("lo"); /* attach BPF program to socket */ assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &main_prog_fd, sizeof(__u32)) == 0); if (argc > 1) f = popen("ping -4 -c5 localhost", "r"); else f = popen("netperf -l 4 localhost", "r"); (void) f; for (i = 0; i < 5; i++) { struct flow_key_record key = {}, next_key; struct pair value; sleep(1); printf("IP src.port -> dst.port bytes packets\n"); while (bpf_map_get_next_key(hash_map_fd, &key, &next_key) == 0) { bpf_map_lookup_elem(hash_map_fd, &next_key, &value); printf("%s.%05d -> %s.%05d %12lld %12lld\n", inet_ntoa((struct in_addr){htonl(next_key.src)}), next_key.port16[0], inet_ntoa((struct in_addr){htonl(next_key.dst)}), next_key.port16[1], value.bytes, value.packets); key = next_key; } } cleanup: bpf_object__close(obj); return 0; }
linux-master
samples/bpf/sockex3_user.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> SEC("kprobe/__set_task_comm") int prog(struct pt_regs *ctx) { struct signal_struct *signal; struct task_struct *tsk; char oldcomm[TASK_COMM_LEN] = {}; char newcomm[TASK_COMM_LEN] = {}; u16 oom_score_adj; u32 pid; tsk = (void *)PT_REGS_PARM1_CORE(ctx); pid = BPF_CORE_READ(tsk, pid); bpf_core_read_str(oldcomm, sizeof(oldcomm), &tsk->comm); bpf_core_read_str(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx)); signal = BPF_CORE_READ(tsk, signal); oom_score_adj = BPF_CORE_READ(signal, oom_score_adj); return 0; } SEC("kprobe/fib_table_lookup") int prog2(struct pt_regs *ctx) { return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/test_overhead_kprobe.bpf.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <bpf/bpf_helpers.h> /* from /sys/kernel/tracing/events/task/task_rename/format */ SEC("tracepoint/task/task_rename") int prog(struct trace_event_raw_task_rename *ctx) { return 0; } /* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */ SEC("tracepoint/fib/fib_table_lookup") int prog2(struct trace_event_raw_fib_table_lookup *ctx) { return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/test_overhead_tp.bpf.c
/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> struct start_key { dev_t dev; u32 _pad; sector_t sector; }; struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, long); __type(value, u64); __uint(max_entries, 4096); } my_map SEC(".maps"); /* from /sys/kernel/tracing/events/block/block_io_start/format */ SEC("tracepoint/block/block_io_start") int bpf_prog1(struct trace_event_raw_block_rq *ctx) { u64 val = bpf_ktime_get_ns(); struct start_key key = { .dev = ctx->dev, .sector = ctx->sector }; bpf_map_update_elem(&my_map, &key, &val, BPF_ANY); return 0; } static unsigned int log2l(unsigned long long n) { #define S(k) if (n >= (1ull << k)) { i += k; n >>= k; } int i = -(n == 0); S(32); S(16); S(8); S(4); S(2); S(1); return i; #undef S } #define SLOTS 100 struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(key_size, sizeof(u32)); __uint(value_size, sizeof(u64)); __uint(max_entries, SLOTS); } lat_map SEC(".maps"); /* from /sys/kernel/tracing/events/block/block_io_done/format */ SEC("tracepoint/block/block_io_done") int bpf_prog2(struct trace_event_raw_block_rq *ctx) { struct start_key key = { .dev = ctx->dev, .sector = ctx->sector }; u64 *value, l, base; u32 index; value = bpf_map_lookup_elem(&my_map, &key); if (!value) return 0; u64 cur_time = bpf_ktime_get_ns(); u64 delta = cur_time - *value; bpf_map_delete_elem(&my_map, &key); /* the lines below are computing index = log10(delta)*10 * using integer arithmetic * index = 29 ~ 1 usec * index = 59 ~ 1 msec * index = 89 ~ 1 sec * index = 99 ~ 10sec or more * log10(x)*10 = log2(x)*10/log2(10) = log2(x)*3 */ l = log2l(delta); base = 1ll << l; index = (l * 64 + (delta - base) * 64 / base) * 3 / 64; if (index >= SLOTS) index = SLOTS - 1; value = bpf_map_lookup_elem(&lat_map, &index); if (value) *value += 1; return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex3.bpf.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/version.h> #include <linux/ptrace.h> #include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> /* * The CPU number, cstate number and pstate number are based * on 96boards Hikey with octa CA53 CPUs. * * Every CPU have three idle states for cstate: * WFI, CPU_OFF, CLUSTER_OFF * * Every CPU have 5 operating points: * 208MHz, 432MHz, 729MHz, 960MHz, 1200MHz * * This code is based on these assumption and other platforms * need to adjust these definitions. */ #define MAX_CPU 8 #define MAX_PSTATE_ENTRIES 5 #define MAX_CSTATE_ENTRIES 3 static int cpu_opps[] = { 208000, 432000, 729000, 960000, 1200000 }; /* * my_map structure is used to record cstate and pstate index and * timestamp (Idx, Ts), when new event incoming we need to update * combination for new state index and timestamp (Idx`, Ts`). * * Based on (Idx, Ts) and (Idx`, Ts`) we can calculate the time * interval for the previous state: Duration(Idx) = Ts` - Ts. * * Every CPU has one below array for recording state index and * timestamp, and record for cstate and pstate saperately: * * +--------------------------+ * | cstate timestamp | * +--------------------------+ * | cstate index | * +--------------------------+ * | pstate timestamp | * +--------------------------+ * | pstate index | * +--------------------------+ */ #define MAP_OFF_CSTATE_TIME 0 #define MAP_OFF_CSTATE_IDX 1 #define MAP_OFF_PSTATE_TIME 2 #define MAP_OFF_PSTATE_IDX 3 #define MAP_OFF_NUM 4 struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, u64); __uint(max_entries, MAX_CPU * MAP_OFF_NUM); } my_map SEC(".maps"); /* cstate_duration records duration time for every idle state per CPU */ struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, u64); __uint(max_entries, MAX_CPU * MAX_CSTATE_ENTRIES); } cstate_duration SEC(".maps"); /* pstate_duration records duration time for every operating point per CPU */ struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, u64); __uint(max_entries, MAX_CPU * MAX_PSTATE_ENTRIES); } pstate_duration SEC(".maps"); /* * The trace events for cpu_idle and cpu_frequency are taken from: * /sys/kernel/tracing/events/power/cpu_idle/format * /sys/kernel/tracing/events/power/cpu_frequency/format * * These two events have same format, so define one common structure. */ struct cpu_args { u64 pad; u32 state; u32 cpu_id; }; /* calculate pstate index, returns MAX_PSTATE_ENTRIES for failure */ static u32 find_cpu_pstate_idx(u32 frequency) { u32 i; for (i = 0; i < sizeof(cpu_opps) / sizeof(u32); i++) { if (frequency == cpu_opps[i]) return i; } return i; } SEC("tracepoint/power/cpu_idle") int bpf_prog1(struct cpu_args *ctx) { u64 *cts, *pts, *cstate, *pstate, prev_state, cur_ts, delta; u32 key, cpu, pstate_idx; u64 *val; if (ctx->cpu_id > MAX_CPU) return 0; cpu = ctx->cpu_id; key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_TIME; cts = bpf_map_lookup_elem(&my_map, &key); if (!cts) return 0; key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX; cstate = bpf_map_lookup_elem(&my_map, &key); if (!cstate) return 0; key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME; pts = bpf_map_lookup_elem(&my_map, &key); if (!pts) return 0; key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX; pstate = bpf_map_lookup_elem(&my_map, &key); if (!pstate) return 0; prev_state = *cstate; *cstate = ctx->state; if (!*cts) { *cts = bpf_ktime_get_ns(); return 0; } cur_ts = bpf_ktime_get_ns(); delta = cur_ts - *cts; *cts = cur_ts; /* * When state doesn't equal to (u32)-1, the cpu will enter * one idle state; for this case we need to record interval * for the pstate. * * OPP2 * +---------------------+ * OPP1 | | * ---------+ | * | Idle state * +--------------- * * |<- pstate duration ->| * ^ ^ * pts cur_ts */ if (ctx->state != (u32)-1) { /* record pstate after have first cpu_frequency event */ if (!*pts) return 0; delta = cur_ts - *pts; pstate_idx = find_cpu_pstate_idx(*pstate); if (pstate_idx >= MAX_PSTATE_ENTRIES) return 0; key = cpu * MAX_PSTATE_ENTRIES + pstate_idx; val = bpf_map_lookup_elem(&pstate_duration, &key); if (val) __sync_fetch_and_add((long *)val, delta); /* * When state equal to (u32)-1, the cpu just exits from one * specific idle state; for this case we need to record * interval for the pstate. * * OPP2 * -----------+ * | OPP1 * | +----------- * | Idle state | * +---------------------+ * * |<- cstate duration ->| * ^ ^ * cts cur_ts */ } else { key = cpu * MAX_CSTATE_ENTRIES + prev_state; val = bpf_map_lookup_elem(&cstate_duration, &key); if (val) __sync_fetch_and_add((long *)val, delta); } /* Update timestamp for pstate as new start time */ if (*pts) *pts = cur_ts; return 0; } SEC("tracepoint/power/cpu_frequency") int bpf_prog2(struct cpu_args *ctx) { u64 *pts, *cstate, *pstate, prev_state, cur_ts, delta; u32 key, cpu, pstate_idx; u64 *val; cpu = ctx->cpu_id; key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_TIME; pts = bpf_map_lookup_elem(&my_map, &key); if (!pts) return 0; key = cpu * MAP_OFF_NUM + MAP_OFF_PSTATE_IDX; pstate = bpf_map_lookup_elem(&my_map, &key); if (!pstate) return 0; key = cpu * MAP_OFF_NUM + MAP_OFF_CSTATE_IDX; cstate = bpf_map_lookup_elem(&my_map, &key); if (!cstate) return 0; prev_state = *pstate; *pstate = ctx->state; if (!*pts) { *pts = bpf_ktime_get_ns(); return 0; } cur_ts = bpf_ktime_get_ns(); delta = cur_ts - *pts; *pts = cur_ts; /* When CPU is in idle, bail out to skip pstate statistics */ if (*cstate != (u32)(-1)) return 0; /* * The cpu changes to another different OPP (in below diagram * change frequency from OPP3 to OPP1), need recording interval * for previous frequency OPP3 and update timestamp as start * time for new frequency OPP1. * * OPP3 * +---------------------+ * OPP2 | | * ---------+ | * | OPP1 * +--------------- * * |<- pstate duration ->| * ^ ^ * pts cur_ts */ pstate_idx = find_cpu_pstate_idx(*pstate); if (pstate_idx >= MAX_PSTATE_ENTRIES) return 0; key = cpu * MAX_PSTATE_ENTRIES + pstate_idx; val = bpf_map_lookup_elem(&pstate_duration, &key); if (val) __sync_fetch_and_add((long *)val, delta); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/cpustat_kern.c
#include <uapi/linux/bpf.h> #include <uapi/linux/if_ether.h> #include <uapi/linux/if_packet.h> #include <uapi/linux/ip.h> #include <bpf/bpf_helpers.h> #include "bpf_legacy.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, long); __uint(max_entries, 256); } my_map SEC(".maps"); SEC("socket1") int bpf_prog1(struct __sk_buff *skb) { int index = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); long *value; if (skb->pkt_type != PACKET_OUTGOING) return 0; value = bpf_map_lookup_elem(&my_map, &index); if (value) __sync_fetch_and_add(value, skb->len); return 0; } char _license[] SEC("license") = "GPL";
linux-master
samples/bpf/sockex1_kern.c
/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include "net_shared.h" #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include <bpf/bpf_tracing.h> /* kprobe is NOT a stable ABI * kernel functions can be removed, renamed or completely change semantics. * Number of arguments and their positions can change, etc. * In such case this bpf+kprobe example will no longer be meaningful */ SEC("kprobe.multi/__netif_receive_skb_core*") int bpf_prog1(struct pt_regs *ctx) { /* attaches to kprobe __netif_receive_skb_core, * looks for packets on loobpack device and prints them * (wildcard is used for avoiding symbol mismatch due to optimization) */ char devname[IFNAMSIZ]; struct net_device *dev; struct sk_buff *skb; int len; bpf_core_read(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx)); dev = BPF_CORE_READ(skb, dev); len = BPF_CORE_READ(skb, len); BPF_CORE_READ_STR_INTO(&devname, dev, name); if (devname[0] == 'l' && devname[1] == 'o') { char fmt[] = "skb %p len %d\n"; /* using bpf_trace_printk() for DEBUG ONLY */ bpf_trace_printk(fmt, sizeof(fmt), skb, len); } return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/tracex1.bpf.c
/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com * Copyright (c) 2015 BMW Car IT GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include <linux/version.h> #include <linux/ptrace.h> #include <uapi/linux/bpf.h> #include <bpf/bpf_helpers.h> #define MAX_ENTRIES 20 #define MAX_CPU 4 /* We need to stick to static allocated memory (an array instead of * hash table) because managing dynamic memory from the * trace_preempt_[on|off] tracepoints hooks is not supported. */ struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, int); __type(value, u64); __uint(max_entries, MAX_CPU); } my_map SEC(".maps"); SEC("kprobe/trace_preempt_off") int bpf_prog1(struct pt_regs *ctx) { int cpu = bpf_get_smp_processor_id(); u64 *ts = bpf_map_lookup_elem(&my_map, &cpu); if (ts) *ts = bpf_ktime_get_ns(); return 0; } static unsigned int log2(unsigned int v) { unsigned int r; unsigned int shift; r = (v > 0xFFFF) << 4; v >>= r; shift = (v > 0xFF) << 3; v >>= shift; r |= shift; shift = (v > 0xF) << 2; v >>= shift; r |= shift; shift = (v > 0x3) << 1; v >>= shift; r |= shift; r |= (v >> 1); return r; } static unsigned int log2l(unsigned long v) { unsigned int hi = v >> 32; if (hi) return log2(hi) + 32; else return log2(v); } struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, int); __type(value, long); __uint(max_entries, MAX_CPU * MAX_ENTRIES); } my_lat SEC(".maps"); SEC("kprobe/trace_preempt_on") int bpf_prog2(struct pt_regs *ctx) { u64 *ts, cur_ts, delta; int key, cpu; long *val; cpu = bpf_get_smp_processor_id(); ts = bpf_map_lookup_elem(&my_map, &cpu); if (!ts) return 0; cur_ts = bpf_ktime_get_ns(); delta = log2l(cur_ts - *ts); if (delta > MAX_ENTRIES - 1) delta = MAX_ENTRIES - 1; key = cpu * MAX_ENTRIES + delta; val = bpf_map_lookup_elem(&my_lat, &key); if (val) __sync_fetch_and_add((long *)val, 1); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/lathist_kern.c
/* eBPF example program: * - creates arraymap in kernel with key 4 bytes and value 8 bytes * * - loads eBPF program: * r0 = skb->data[ETH_HLEN + offsetof(struct iphdr, protocol)]; * *(u32*)(fp - 4) = r0; * // assuming packet is IPv4, lookup ip->proto in a map * value = bpf_map_lookup_elem(map_fd, fp - 4); * if (value) * (*(u64*)value) += 1; * * - attaches this program to loopback interface "lo" raw socket * * - every second user space reads map[tcp], map[udp], map[icmp] to see * how many packets of given protocol were seen on "lo" */ #include <stdio.h> #include <unistd.h> #include <assert.h> #include <linux/bpf.h> #include <string.h> #include <stdlib.h> #include <errno.h> #include <sys/socket.h> #include <arpa/inet.h> #include <linux/if_ether.h> #include <linux/ip.h> #include <stddef.h> #include <bpf/bpf.h> #include "bpf_insn.h" #include "sock_example.h" #include "bpf_util.h" char bpf_log_buf[BPF_LOG_BUF_SIZE]; static int test_sock(void) { int sock = -1, map_fd, prog_fd, i, key; long long value = 0, tcp_cnt, udp_cnt, icmp_cnt; map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(key), sizeof(value), 256, NULL); if (map_fd < 0) { printf("failed to create map '%s'\n", strerror(errno)); goto cleanup; } struct bpf_insn prog[] = { BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_LD_ABS(BPF_B, ETH_HLEN + offsetof(struct iphdr, protocol) /* R0 = ip->proto */), BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ BPF_LD_MAP_FD(BPF_REG_1, map_fd), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ BPF_EXIT_INSN(), }; size_t insns_cnt = ARRAY_SIZE(prog); LIBBPF_OPTS(bpf_prog_load_opts, opts, .log_buf = bpf_log_buf, .log_size = BPF_LOG_BUF_SIZE, ); prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", prog, insns_cnt, &opts); if (prog_fd < 0) { printf("failed to load prog '%s'\n", strerror(errno)); goto cleanup; } sock = open_raw_sock("lo"); if (setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd, sizeof(prog_fd)) < 0) { printf("setsockopt %s\n", strerror(errno)); goto cleanup; } for (i = 0; i < 10; i++) { key = IPPROTO_TCP; assert(bpf_map_lookup_elem(map_fd, &key, &tcp_cnt) == 0); key = IPPROTO_UDP; assert(bpf_map_lookup_elem(map_fd, &key, &udp_cnt) == 0); key = IPPROTO_ICMP; assert(bpf_map_lookup_elem(map_fd, &key, &icmp_cnt) == 0); printf("TCP %lld UDP %lld ICMP %lld packets\n", tcp_cnt, udp_cnt, icmp_cnt); sleep(1); } cleanup: /* maps, programs, raw sockets will auto cleanup on process exit */ return 0; } int main(void) { FILE *f; f = popen("ping -4 -c5 localhost", "r"); (void)f; return test_sock(); }
linux-master
samples/bpf/sock_example.c
#define _GNU_SOURCE #include <stdio.h> #include <unistd.h> #include <bpf/libbpf.h> int main(int argc, char **argv) { struct bpf_link *link = NULL; struct bpf_program *prog; struct bpf_object *obj; char filename[256]; char command[256]; int ret = 0; FILE *f; if (!argv[1]) { fprintf(stderr, "ERROR: Run with the btrfs device argument!\n"); return 0; } snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); obj = bpf_object__open_file(filename, NULL); if (libbpf_get_error(obj)) { fprintf(stderr, "ERROR: opening BPF object file failed\n"); return 0; } prog = bpf_object__find_program_by_name(obj, "bpf_prog1"); if (!prog) { fprintf(stderr, "ERROR: finding a prog in obj file failed\n"); goto cleanup; } /* load BPF program */ if (bpf_object__load(obj)) { fprintf(stderr, "ERROR: loading BPF object file failed\n"); goto cleanup; } link = bpf_program__attach(prog); if (libbpf_get_error(link)) { fprintf(stderr, "ERROR: bpf_program__attach failed\n"); link = NULL; goto cleanup; } snprintf(command, 256, "mount %s tmpmnt/", argv[1]); f = popen(command, "r"); ret = pclose(f); cleanup: bpf_link__destroy(link); bpf_object__close(obj); return ret ? 0 : 1; }
linux-master
samples/bpf/tracex7_user.c
/* Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #include "vmlinux.h" #include <errno.h> #include <linux/version.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> #define MAX_ENTRIES 1000 #define MAX_NR_CPUS 1024 struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, u32); __type(value, long); __uint(max_entries, MAX_ENTRIES); } hash_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, u32); __type(value, long); __uint(max_entries, 10000); } lru_hash_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, u32); __type(value, long); __uint(max_entries, 10000); __uint(map_flags, BPF_F_NO_COMMON_LRU); } nocommon_lru_hash_map SEC(".maps"); struct inner_lru { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, u32); __type(value, long); __uint(max_entries, MAX_ENTRIES); __uint(map_flags, BPF_F_NUMA_NODE); __uint(numa_node, 0); } inner_lru_hash_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, MAX_NR_CPUS); __uint(key_size, sizeof(u32)); __array(values, struct inner_lru); /* use inner_lru as inner map */ } array_of_lru_hashs SEC(".maps") = { /* statically initialize the first element */ .values = { &inner_lru_hash_map }, }; struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(key_size, sizeof(u32)); __uint(value_size, sizeof(long)); __uint(max_entries, MAX_ENTRIES); } percpu_hash_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __type(key, u32); __type(value, long); __uint(max_entries, MAX_ENTRIES); __uint(map_flags, BPF_F_NO_PREALLOC); } hash_map_alloc SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_PERCPU_HASH); __uint(key_size, sizeof(u32)); __uint(value_size, sizeof(long)); __uint(max_entries, MAX_ENTRIES); __uint(map_flags, BPF_F_NO_PREALLOC); } percpu_hash_map_alloc SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LPM_TRIE); __uint(key_size, 8); __uint(value_size, sizeof(long)); __uint(max_entries, 10000); __uint(map_flags, BPF_F_NO_PREALLOC); } lpm_trie_map_alloc SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); __type(value, long); __uint(max_entries, MAX_ENTRIES); } array_map SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, u32); __type(value, long); __uint(max_entries, MAX_ENTRIES); } lru_hash_lookup_map SEC(".maps"); SEC("ksyscall/getuid") int BPF_KSYSCALL(stress_hmap) { u32 key = bpf_get_current_pid_tgid(); long init_val = 1; long *value; int i; for (i = 0; i < 10; i++) { bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY); value = bpf_map_lookup_elem(&hash_map, &key); if (value) bpf_map_delete_elem(&hash_map, &key); } return 0; } SEC("ksyscall/geteuid") int BPF_KSYSCALL(stress_percpu_hmap) { u32 key = bpf_get_current_pid_tgid(); long init_val = 1; long *value; int i; for (i = 0; i < 10; i++) { bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY); value = bpf_map_lookup_elem(&percpu_hash_map, &key); if (value) bpf_map_delete_elem(&percpu_hash_map, &key); } return 0; } SEC("ksyscall/getgid") int BPF_KSYSCALL(stress_hmap_alloc) { u32 key = bpf_get_current_pid_tgid(); long init_val = 1; long *value; int i; for (i = 0; i < 10; i++) { bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY); value = bpf_map_lookup_elem(&hash_map_alloc, &key); if (value) bpf_map_delete_elem(&hash_map_alloc, &key); } return 0; } SEC("ksyscall/getegid") int BPF_KSYSCALL(stress_percpu_hmap_alloc) { u32 key = bpf_get_current_pid_tgid(); long init_val = 1; long *value; int i; for (i = 0; i < 10; i++) { bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY); value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key); if (value) bpf_map_delete_elem(&percpu_hash_map_alloc, &key); } return 0; } SEC("ksyscall/connect") int BPF_KSYSCALL(stress_lru_hmap_alloc, int fd, struct sockaddr_in *uservaddr, int addrlen) { char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; union { u16 dst6[8]; struct { u16 magic0; u16 magic1; u16 tcase; u16 unused16; u32 unused32; u32 key; }; } test_params; struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)uservaddr; u16 test_case; long val = 1; u32 key = 0; int ret; if (addrlen != sizeof(*in6)) return 0; ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6), &in6->sin6_addr); if (ret) goto done; if (test_params.magic0 != 0xdead || test_params.magic1 != 0xbeef) return 0; test_case = test_params.tcase; if (test_case != 3) key = bpf_get_prandom_u32(); if (test_case == 0) { ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY); } else if (test_case == 1) { ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val, BPF_ANY); } else if (test_case == 2) { void *nolocal_lru_map; int cpu = bpf_get_smp_processor_id(); nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs, &cpu); if (!nolocal_lru_map) { ret = -ENOENT; goto done; } ret = bpf_map_update_elem(nolocal_lru_map, &key, &val, BPF_ANY); } else if (test_case == 3) { u32 i; key = test_params.key; #pragma clang loop unroll(full) for (i = 0; i < 32; i++) { bpf_map_lookup_elem(&lru_hash_lookup_map, &key); key++; } } else { ret = -EINVAL; } done: if (ret) bpf_trace_printk(fmt, sizeof(fmt), ret); return 0; } SEC("ksyscall/gettid") int BPF_KSYSCALL(stress_lpm_trie_map_alloc) { union { u32 b32[2]; u8 b8[8]; } key; unsigned int i; key.b32[0] = 32; key.b8[4] = 192; key.b8[5] = 168; key.b8[6] = 0; key.b8[7] = 1; #pragma clang loop unroll(full) for (i = 0; i < 32; ++i) bpf_map_lookup_elem(&lpm_trie_map_alloc, &key); return 0; } SEC("ksyscall/getpgid") int BPF_KSYSCALL(stress_hash_map_lookup) { u32 key = 1, i; long *value; #pragma clang loop unroll(full) for (i = 0; i < 64; ++i) value = bpf_map_lookup_elem(&hash_map, &key); return 0; } SEC("ksyscall/getppid") int BPF_KSYSCALL(stress_array_map_lookup) { u32 key = 1, i; long *value; #pragma clang loop unroll(full) for (i = 0; i < 64; ++i) value = bpf_map_lookup_elem(&array_map, &key); return 0; } char _license[] SEC("license") = "GPL"; u32 _version SEC("version") = LINUX_VERSION_CODE;
linux-master
samples/bpf/map_perf_test.bpf.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sample fifo dma implementation * * Copyright (C) 2010 Stefani Seibold <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/kfifo.h> /* * This module shows how to handle fifo dma operations. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 32 static struct kfifo fifo; static int __init example_init(void) { int i; unsigned int ret; unsigned int nents; struct scatterlist sg[10]; printk(KERN_INFO "DMA fifo test start\n"); if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) { printk(KERN_WARNING "error kfifo_alloc\n"); return -ENOMEM; } printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo)); kfifo_in(&fifo, "test", 4); for (i = 0; i != 9; i++) kfifo_put(&fifo, i); /* kick away first byte */ kfifo_skip(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); /* * Configure the kfifo buffer to receive data from DMA input. * * .--------------------------------------. * | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 | * |---|------------------|---------------| * \_/ \________________/ \_____________/ * \ \ \ * \ \_allocated data \ * \_*free space* \_*free space* * * We need two different SG entries: one for the free space area at the * end of the kfifo buffer (19 bytes) and another for the first free * byte at the beginning, after the kfifo_skip(). */ sg_init_table(sg, ARRAY_SIZE(sg)); nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* fifo is full and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_in_prepare\n"); return -EIO; } /* receive data */ printk(KERN_INFO "scatterlist for receive:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page %p offset 0x%.8x length 0x%.8x\n", i, sg_page(&sg[i]), sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: zero bytes received */ ret = 0; /* finish the dma operation and update the received data */ kfifo_dma_in_finish(&fifo, ret); /* Prepare to transmit data, example: 8 bytes */ nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); printk(KERN_INFO "DMA sgl entries: %d\n", nents); if (!nents) { /* no data was available and no sgl was created */ printk(KERN_WARNING "error kfifo_dma_out_prepare\n"); return -EIO; } printk(KERN_INFO "scatterlist for transmit:\n"); for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " "page %p offset 0x%.8x length 0x%.8x\n", i, sg_page(&sg[i]), sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; } /* put here your code to setup and exectute the dma operation */ /* ... */ /* example: 5 bytes transmitted */ ret = 5; /* finish the dma operation and update the transmitted data */ kfifo_dma_out_finish(&fifo, ret); ret = kfifo_len(&fifo); printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo)); if (ret != 7) { printk(KERN_WARNING "size mismatch: test failed"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static void __exit example_exit(void) { kfifo_free(&fifo); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <[email protected]>");
linux-master
samples/kfifo/dma-example.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sample kfifo int type implementation * * Copyright (C) 2010 Stefani Seibold <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/kfifo.h> /* * This module shows how to create a int type fifo. */ /* fifo size in elements (ints) */ #define FIFO_SIZE 32 /* name of the proc entry */ #define PROC_FIFO "int-fifo" /* lock for procfs read access */ static DEFINE_MUTEX(read_access); /* lock for procfs write access */ static DEFINE_MUTEX(write_access); /* * define DYNAMIC in this example for a dynamically allocated fifo. * * Otherwise the fifo storage will be a part of the fifo structure. */ #if 0 #define DYNAMIC #endif #ifdef DYNAMIC static DECLARE_KFIFO_PTR(test, int); #else static DEFINE_KFIFO(test, int, FIFO_SIZE); #endif static const int expected_result[FIFO_SIZE] = { 3, 4, 5, 6, 7, 8, 9, 0, 1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, }; static int __init testfunc(void) { int buf[6]; int i, j; unsigned int ret; printk(KERN_INFO "int fifo test start\n"); /* put values into the fifo */ for (i = 0; i != 10; i++) kfifo_put(&test, i); /* show the number of used elements */ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* get max of 2 elements from the fifo */ ret = kfifo_out(&test, buf, 2); printk(KERN_INFO "ret: %d\n", ret); /* and put it back to the end of the fifo */ ret = kfifo_in(&test, buf, ret); printk(KERN_INFO "ret: %d\n", ret); /* skip first element of the fifo */ printk(KERN_INFO "skip 1st element\n"); kfifo_skip(&test); /* put values into the fifo until is full */ for (i = 20; kfifo_put(&test, i); i++) ; printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); /* show the first value without removing from the fifo */ if (kfifo_peek(&test, &i)) printk(KERN_INFO "%d\n", i); /* check the correctness of all values in the fifo */ j = 0; while (kfifo_get(&test, &i)) { printk(KERN_INFO "item = %d\n", i); if (i != expected_result[j++]) { printk(KERN_WARNING "value mismatch: test failed\n"); return -EIO; } } if (j != ARRAY_SIZE(expected_result)) { printk(KERN_WARNING "size mismatch: test failed\n"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static ssize_t fifo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&write_access)) return -ERESTARTSYS; ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_access); if (ret) return ret; return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&read_access)) return -ERESTARTSYS; ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_access); if (ret) return ret; return copied; } static const struct proc_ops fifo_proc_ops = { .proc_read = fifo_read, .proc_write = fifo_write, .proc_lseek = noop_llseek, }; static int __init example_init(void) { #ifdef DYNAMIC int ret; ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } #endif if (testfunc() < 0) { #ifdef DYNAMIC kfifo_free(&test); #endif return -EIO; } if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) { #ifdef DYNAMIC kfifo_free(&test); #endif return -ENOMEM; } return 0; } static void __exit example_exit(void) { remove_proc_entry(PROC_FIFO, NULL); #ifdef DYNAMIC kfifo_free(&test); #endif } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <[email protected]>");
linux-master
samples/kfifo/inttype-example.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sample dynamic sized record fifo implementation * * Copyright (C) 2010 Stefani Seibold <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/kfifo.h> /* * This module shows how to create a variable sized record fifo. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 128 /* name of the proc entry */ #define PROC_FIFO "record-fifo" /* lock for procfs read access */ static DEFINE_MUTEX(read_access); /* lock for procfs write access */ static DEFINE_MUTEX(write_access); /* * define DYNAMIC in this example for a dynamically allocated fifo. * * Otherwise the fifo storage will be a part of the fifo structure. */ #if 0 #define DYNAMIC #endif /* * struct kfifo_rec_ptr_1 and STRUCT_KFIFO_REC_1 can handle records of a * length between 0 and 255 bytes. * * struct kfifo_rec_ptr_2 and STRUCT_KFIFO_REC_2 can handle records of a * length between 0 and 65535 bytes. */ #ifdef DYNAMIC struct kfifo_rec_ptr_1 test; #else typedef STRUCT_KFIFO_REC_1(FIFO_SIZE) mytest; static mytest test; #endif static const char *expected_result[] = { "a", "bb", "ccc", "dddd", "eeeee", "ffffff", "ggggggg", "hhhhhhhh", "iiiiiiiii", "jjjjjjjjjj", }; static int __init testfunc(void) { char buf[100]; unsigned int i; unsigned int ret; struct { unsigned char buf[6]; } hello = { "hello" }; printk(KERN_INFO "record fifo test start\n"); kfifo_in(&test, &hello, sizeof(hello)); /* show the size of the next record in the fifo */ printk(KERN_INFO "fifo peek len: %u\n", kfifo_peek_len(&test)); /* put in variable length data */ for (i = 0; i < 10; i++) { memset(buf, 'a' + i, i + 1); kfifo_in(&test, buf, i + 1); } /* skip first element of the fifo */ printk(KERN_INFO "skip 1st element\n"); kfifo_skip(&test); printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* show the first record without removing from the fifo */ ret = kfifo_out_peek(&test, buf, sizeof(buf)); if (ret) printk(KERN_INFO "%.*s\n", ret, buf); /* check the correctness of all values in the fifo */ i = 0; while (!kfifo_is_empty(&test)) { ret = kfifo_out(&test, buf, sizeof(buf)); buf[ret] = '\0'; printk(KERN_INFO "item = %.*s\n", ret, buf); if (strcmp(buf, expected_result[i++])) { printk(KERN_WARNING "value mismatch: test failed\n"); return -EIO; } } if (i != ARRAY_SIZE(expected_result)) { printk(KERN_WARNING "size mismatch: test failed\n"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static ssize_t fifo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&write_access)) return -ERESTARTSYS; ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_access); if (ret) return ret; return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&read_access)) return -ERESTARTSYS; ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_access); if (ret) return ret; return copied; } static const struct proc_ops fifo_proc_ops = { .proc_read = fifo_read, .proc_write = fifo_write, .proc_lseek = noop_llseek, }; static int __init example_init(void) { #ifdef DYNAMIC int ret; ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } #else INIT_KFIFO(test); #endif if (testfunc() < 0) { #ifdef DYNAMIC kfifo_free(&test); #endif return -EIO; } if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) { #ifdef DYNAMIC kfifo_free(&test); #endif return -ENOMEM; } return 0; } static void __exit example_exit(void) { remove_proc_entry(PROC_FIFO, NULL); #ifdef DYNAMIC kfifo_free(&test); #endif } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <[email protected]>");
linux-master
samples/kfifo/record-example.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sample kfifo byte stream implementation * * Copyright (C) 2010 Stefani Seibold <[email protected]> */ #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/kfifo.h> /* * This module shows how to create a byte stream fifo. */ /* fifo size in elements (bytes) */ #define FIFO_SIZE 32 /* name of the proc entry */ #define PROC_FIFO "bytestream-fifo" /* lock for procfs read access */ static DEFINE_MUTEX(read_access); /* lock for procfs write access */ static DEFINE_MUTEX(write_access); /* * define DYNAMIC in this example for a dynamically allocated fifo. * * Otherwise the fifo storage will be a part of the fifo structure. */ #if 0 #define DYNAMIC #endif #ifdef DYNAMIC static struct kfifo test; #else static DECLARE_KFIFO(test, unsigned char, FIFO_SIZE); #endif static const unsigned char expected_result[FIFO_SIZE] = { 3, 4, 5, 6, 7, 8, 9, 0, 1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, }; static int __init testfunc(void) { unsigned char buf[6]; unsigned char i, j; unsigned int ret; printk(KERN_INFO "byte stream fifo test start\n"); /* put string into the fifo */ kfifo_in(&test, "hello", 5); /* put values into the fifo */ for (i = 0; i != 10; i++) kfifo_put(&test, i); /* show the number of used elements */ printk(KERN_INFO "fifo len: %u\n", kfifo_len(&test)); /* get max of 5 bytes from the fifo */ i = kfifo_out(&test, buf, 5); printk(KERN_INFO "buf: %.*s\n", i, buf); /* get max of 2 elements from the fifo */ ret = kfifo_out(&test, buf, 2); printk(KERN_INFO "ret: %d\n", ret); /* and put it back to the end of the fifo */ ret = kfifo_in(&test, buf, ret); printk(KERN_INFO "ret: %d\n", ret); /* skip first element of the fifo */ printk(KERN_INFO "skip 1st element\n"); kfifo_skip(&test); /* put values into the fifo until is full */ for (i = 20; kfifo_put(&test, i); i++) ; printk(KERN_INFO "queue len: %u\n", kfifo_len(&test)); /* show the first value without removing from the fifo */ if (kfifo_peek(&test, &i)) printk(KERN_INFO "%d\n", i); /* check the correctness of all values in the fifo */ j = 0; while (kfifo_get(&test, &i)) { printk(KERN_INFO "item = %d\n", i); if (i != expected_result[j++]) { printk(KERN_WARNING "value mismatch: test failed\n"); return -EIO; } } if (j != ARRAY_SIZE(expected_result)) { printk(KERN_WARNING "size mismatch: test failed\n"); return -EIO; } printk(KERN_INFO "test passed\n"); return 0; } static ssize_t fifo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&write_access)) return -ERESTARTSYS; ret = kfifo_from_user(&test, buf, count, &copied); mutex_unlock(&write_access); if (ret) return ret; return copied; } static ssize_t fifo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int ret; unsigned int copied; if (mutex_lock_interruptible(&read_access)) return -ERESTARTSYS; ret = kfifo_to_user(&test, buf, count, &copied); mutex_unlock(&read_access); if (ret) return ret; return copied; } static const struct proc_ops fifo_proc_ops = { .proc_read = fifo_read, .proc_write = fifo_write, .proc_lseek = noop_llseek, }; static int __init example_init(void) { #ifdef DYNAMIC int ret; ret = kfifo_alloc(&test, FIFO_SIZE, GFP_KERNEL); if (ret) { printk(KERN_ERR "error kfifo_alloc\n"); return ret; } #else INIT_KFIFO(test); #endif if (testfunc() < 0) { #ifdef DYNAMIC kfifo_free(&test); #endif return -EIO; } if (proc_create(PROC_FIFO, 0, NULL, &fifo_proc_ops) == NULL) { #ifdef DYNAMIC kfifo_free(&test); #endif return -ENOMEM; } return 0; } static void __exit example_exit(void) { remove_proc_entry(PROC_FIFO, NULL); #ifdef DYNAMIC kfifo_free(&test); #endif } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stefani Seibold <[email protected]>");
linux-master
samples/kfifo/bytestream-example.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/kthread.h> #include <linux/irq_work.h> /* Must not be static to force gcc to consider these non constant */ char *trace_printk_test_global_str = "This is a dynamic string that will use trace_puts\n"; char *trace_printk_test_global_str_irq = "(irq) This is a dynamic string that will use trace_puts\n"; char *trace_printk_test_global_str_fmt = "%sThis is a %s that will use trace_printk\n"; static struct irq_work irqwork; static void trace_printk_irq_work(struct irq_work *work) { trace_printk("(irq) This is a static string that will use trace_bputs\n"); trace_printk(trace_printk_test_global_str_irq); trace_printk("(irq) This is a %s that will use trace_bprintk()\n", "static string"); trace_printk(trace_printk_test_global_str_fmt, "(irq) ", "dynamic string"); } static int __init trace_printk_init(void) { init_irq_work(&irqwork, trace_printk_irq_work); trace_printk("This is a static string that will use trace_bputs\n"); trace_printk(trace_printk_test_global_str); /* Kick off printing in irq context */ irq_work_queue(&irqwork); irq_work_sync(&irqwork); trace_printk("This is a %s that will use trace_bprintk()\n", "static string"); trace_printk(trace_printk_test_global_str_fmt, "", "dynamic string"); return 0; } static void __exit trace_printk_exit(void) { } module_init(trace_printk_init); module_exit(trace_printk_exit); MODULE_AUTHOR("Steven Rostedt"); MODULE_DESCRIPTION("trace-printk"); MODULE_LICENSE("GPL");
linux-master
samples/trace_printk/trace-printk.c
// SPDX-License-Identifier: GPL-2.0 /* * A sample program to run a User VM on the ACRN hypervisor * * This sample runs in a Service VM, which is a privileged VM of ACRN. * CONFIG_ACRN_HSM need to be enabled in the Service VM. * * Guest VM code in guest16.s will be executed after the VM launched. * * Copyright (C) 2020 Intel Corporation. All rights reserved. */ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <unistd.h> #include <signal.h> #include <sys/ioctl.h> #include <linux/acrn.h> #define GUEST_MEMORY_SIZE (1024*1024) void *guest_memory; extern const unsigned char guest16[], guest16_end[]; static char io_request_page[4096] __attribute__((aligned(4096))); static struct acrn_io_request *io_req_buf = (struct acrn_io_request *)io_request_page; __u16 vcpu_num; __u16 vmid; int hsm_fd; int is_running = 1; void vm_exit(int sig) { sig = sig; is_running = 0; ioctl(hsm_fd, ACRN_IOCTL_PAUSE_VM, vmid); ioctl(hsm_fd, ACRN_IOCTL_DESTROY_IOREQ_CLIENT, 0); } int main(int argc, char **argv) { int vcpu_id, ret; struct acrn_vm_creation create_vm = {0}; struct acrn_vm_memmap ram_map = {0}; struct acrn_vcpu_regs regs; struct acrn_io_request *io_req; struct acrn_ioreq_notify __attribute__((aligned(8))) notify; argc = argc; argv = argv; ret = posix_memalign(&guest_memory, 4096, GUEST_MEMORY_SIZE); if (ret < 0) { printf("No enough memory!\n"); return -1; } hsm_fd = open("/dev/acrn_hsm", O_RDWR|O_CLOEXEC); create_vm.ioreq_buf = (__u64)io_req_buf; ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_VM, &create_vm); printf("Created VM! [%d]\n", ret); vcpu_num = create_vm.vcpu_num; vmid = create_vm.vmid; /* setup guest memory */ ram_map.type = ACRN_MEMMAP_RAM; ram_map.vma_base = (__u64)guest_memory; ram_map.len = GUEST_MEMORY_SIZE; ram_map.user_vm_pa = 0; ram_map.attr = ACRN_MEM_ACCESS_RWX; ret = ioctl(hsm_fd, ACRN_IOCTL_SET_MEMSEG, &ram_map); printf("Set up VM memory! [%d]\n", ret); memcpy(guest_memory, guest16, guest16_end-guest16); /* setup vcpu registers */ memset(&regs, 0, sizeof(regs)); regs.vcpu_id = 0; regs.vcpu_regs.rip = 0; /* CR0_ET | CR0_NE */ regs.vcpu_regs.cr0 = 0x30U; regs.vcpu_regs.cs_ar = 0x009FU; regs.vcpu_regs.cs_sel = 0xF000U; regs.vcpu_regs.cs_limit = 0xFFFFU; regs.vcpu_regs.cs_base = 0 & 0xFFFF0000UL; regs.vcpu_regs.rip = 0 & 0xFFFFUL; ret = ioctl(hsm_fd, ACRN_IOCTL_SET_VCPU_REGS, &regs); printf("Set up VM BSP registers! [%d]\n", ret); /* create an ioreq client for this VM */ ret = ioctl(hsm_fd, ACRN_IOCTL_CREATE_IOREQ_CLIENT, 0); printf("Created IO request client! [%d]\n", ret); /* run vm */ ret = ioctl(hsm_fd, ACRN_IOCTL_START_VM, vmid); printf("Start VM! [%d]\n", ret); signal(SIGINT, vm_exit); while (is_running) { ret = ioctl(hsm_fd, ACRN_IOCTL_ATTACH_IOREQ_CLIENT, 0); for (vcpu_id = 0; vcpu_id < vcpu_num; vcpu_id++) { io_req = &io_req_buf[vcpu_id]; if ((__sync_add_and_fetch(&io_req->processed, 0) == ACRN_IOREQ_STATE_PROCESSING) && (!io_req->kernel_handled)) if (io_req->type == ACRN_IOREQ_TYPE_PORTIO) { int bytes, port, in; port = io_req->reqs.pio_request.address; bytes = io_req->reqs.pio_request.size; in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ); printf("Guest VM %s PIO[%x] with size[%x]\n", in ? "read" : "write", port, bytes); notify.vmid = vmid; notify.vcpu = vcpu_id; ioctl(hsm_fd, ACRN_IOCTL_NOTIFY_REQUEST_FINISH, &notify); } } } ret = ioctl(hsm_fd, ACRN_IOCTL_DESTROY_VM, NULL); printf("Destroy VM! [%d]\n", ret); close(hsm_fd); free(guest_memory); return 0; }
linux-master
samples/acrn/vm-sample.c
// SPDX-License-Identifier: GPL-2.0 /* * Sample kobject implementation * * Copyright (C) 2004-2007 Greg Kroah-Hartman <[email protected]> * Copyright (C) 2007 Novell Inc. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/init.h> /* * This module shows how to create a simple subdirectory in sysfs called * /sys/kernel/kobject-example In that directory, 3 files are created: * "foo", "baz", and "bar". If an integer is written to these files, it can be * later read out of it. */ static int foo; static int baz; static int bar; /* * The "foo" file where a static variable is read from and written to. */ static ssize_t foo_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", foo); } static ssize_t foo_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret; ret = kstrtoint(buf, 10, &foo); if (ret < 0) return ret; return count; } /* Sysfs attributes cannot be world-writable. */ static struct kobj_attribute foo_attribute = __ATTR(foo, 0664, foo_show, foo_store); /* * More complex function where we determine which variable is being accessed by * looking at the attribute for the "baz" and "bar" files. */ static ssize_t b_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int var; if (strcmp(attr->attr.name, "baz") == 0) var = baz; else var = bar; return sysfs_emit(buf, "%d\n", var); } static ssize_t b_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int var, ret; ret = kstrtoint(buf, 10, &var); if (ret < 0) return ret; if (strcmp(attr->attr.name, "baz") == 0) baz = var; else bar = var; return count; } static struct kobj_attribute baz_attribute = __ATTR(baz, 0664, b_show, b_store); static struct kobj_attribute bar_attribute = __ATTR(bar, 0664, b_show, b_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *attrs[] = { &foo_attribute.attr, &baz_attribute.attr, &bar_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; /* * An unnamed attribute group will put all of the attributes directly in * the kobject directory. If we specify a name, a subdirectory will be * created for the attributes with the directory being the name of the * attribute group. */ static struct attribute_group attr_group = { .attrs = attrs, }; static struct kobject *example_kobj; static int __init example_init(void) { int retval; /* * Create a simple kobject with the name of "kobject_example", * located under /sys/kernel/ * * As this is a simple directory, no uevent will be sent to * userspace. That is why this function should not be used for * any type of dynamic kobjects, where the name and number are * not known ahead of time. */ example_kobj = kobject_create_and_add("kobject_example", kernel_kobj); if (!example_kobj) return -ENOMEM; /* Create the files associated with this kobject */ retval = sysfs_create_group(example_kobj, &attr_group); if (retval) kobject_put(example_kobj); return retval; } static void __exit example_exit(void) { kobject_put(example_kobj); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
linux-master
samples/kobject/kobject-example.c
// SPDX-License-Identifier: GPL-2.0 /* * Sample kset and ktype implementation * * Copyright (C) 2004-2007 Greg Kroah-Hartman <[email protected]> * Copyright (C) 2007 Novell Inc. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> /* * This module shows how to create a kset in sysfs called * /sys/kernel/kset-example * Then tree kobjects are created and assigned to this kset, "foo", "baz", * and "bar". In those kobjects, attributes of the same name are also * created and if an integer is written to these files, it can be later * read out of it. */ /* * This is our "object" that we will create a few of and register them with * sysfs. */ struct foo_obj { struct kobject kobj; int foo; int baz; int bar; }; #define to_foo_obj(x) container_of(x, struct foo_obj, kobj) /* a custom attribute that works just for a struct foo_obj. */ struct foo_attribute { struct attribute attr; ssize_t (*show)(struct foo_obj *foo, struct foo_attribute *attr, char *buf); ssize_t (*store)(struct foo_obj *foo, struct foo_attribute *attr, const char *buf, size_t count); }; #define to_foo_attr(x) container_of(x, struct foo_attribute, attr) /* * The default show function that must be passed to sysfs. This will be * called by sysfs for whenever a show function is called by the user on a * sysfs file associated with the kobjects we have registered. We need to * transpose back from a "default" kobject to our custom struct foo_obj and * then call the show function for that specific object. */ static ssize_t foo_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct foo_attribute *attribute; struct foo_obj *foo; attribute = to_foo_attr(attr); foo = to_foo_obj(kobj); if (!attribute->show) return -EIO; return attribute->show(foo, attribute, buf); } /* * Just like the default show function above, but this one is for when the * sysfs "store" is requested (when a value is written to a file.) */ static ssize_t foo_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct foo_attribute *attribute; struct foo_obj *foo; attribute = to_foo_attr(attr); foo = to_foo_obj(kobj); if (!attribute->store) return -EIO; return attribute->store(foo, attribute, buf, len); } /* Our custom sysfs_ops that we will associate with our ktype later on */ static const struct sysfs_ops foo_sysfs_ops = { .show = foo_attr_show, .store = foo_attr_store, }; /* * The release function for our object. This is REQUIRED by the kernel to * have. We free the memory held in our object here. * * NEVER try to get away with just a "blank" release function to try to be * smarter than the kernel. Turns out, no one ever is... */ static void foo_release(struct kobject *kobj) { struct foo_obj *foo; foo = to_foo_obj(kobj); kfree(foo); } /* * The "foo" file where the .foo variable is read from and written to. */ static ssize_t foo_show(struct foo_obj *foo_obj, struct foo_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", foo_obj->foo); } static ssize_t foo_store(struct foo_obj *foo_obj, struct foo_attribute *attr, const char *buf, size_t count) { int ret; ret = kstrtoint(buf, 10, &foo_obj->foo); if (ret < 0) return ret; return count; } /* Sysfs attributes cannot be world-writable. */ static struct foo_attribute foo_attribute = __ATTR(foo, 0664, foo_show, foo_store); /* * More complex function where we determine which variable is being accessed by * looking at the attribute for the "baz" and "bar" files. */ static ssize_t b_show(struct foo_obj *foo_obj, struct foo_attribute *attr, char *buf) { int var; if (strcmp(attr->attr.name, "baz") == 0) var = foo_obj->baz; else var = foo_obj->bar; return sysfs_emit(buf, "%d\n", var); } static ssize_t b_store(struct foo_obj *foo_obj, struct foo_attribute *attr, const char *buf, size_t count) { int var, ret; ret = kstrtoint(buf, 10, &var); if (ret < 0) return ret; if (strcmp(attr->attr.name, "baz") == 0) foo_obj->baz = var; else foo_obj->bar = var; return count; } static struct foo_attribute baz_attribute = __ATTR(baz, 0664, b_show, b_store); static struct foo_attribute bar_attribute = __ATTR(bar, 0664, b_show, b_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *foo_default_attrs[] = { &foo_attribute.attr, &baz_attribute.attr, &bar_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; ATTRIBUTE_GROUPS(foo_default); /* * Our own ktype for our kobjects. Here we specify our sysfs ops, the * release function, and the set of default attributes we want created * whenever a kobject of this type is registered with the kernel. */ static const struct kobj_type foo_ktype = { .sysfs_ops = &foo_sysfs_ops, .release = foo_release, .default_groups = foo_default_groups, }; static struct kset *example_kset; static struct foo_obj *foo_obj; static struct foo_obj *bar_obj; static struct foo_obj *baz_obj; static struct foo_obj *create_foo_obj(const char *name) { struct foo_obj *foo; int retval; /* allocate the memory for the whole object */ foo = kzalloc(sizeof(*foo), GFP_KERNEL); if (!foo) return NULL; /* * As we have a kset for this kobject, we need to set it before calling * the kobject core. */ foo->kobj.kset = example_kset; /* * Initialize and add the kobject to the kernel. All the default files * will be created here. As we have already specified a kset for this * kobject, we don't have to set a parent for the kobject, the kobject * will be placed beneath that kset automatically. */ retval = kobject_init_and_add(&foo->kobj, &foo_ktype, NULL, "%s", name); if (retval) { kobject_put(&foo->kobj); return NULL; } /* * We are always responsible for sending the uevent that the kobject * was added to the system. */ kobject_uevent(&foo->kobj, KOBJ_ADD); return foo; } static void destroy_foo_obj(struct foo_obj *foo) { kobject_put(&foo->kobj); } static int __init example_init(void) { /* * Create a kset with the name of "kset_example", * located under /sys/kernel/ */ example_kset = kset_create_and_add("kset_example", NULL, kernel_kobj); if (!example_kset) return -ENOMEM; /* * Create three objects and register them with our kset */ foo_obj = create_foo_obj("foo"); if (!foo_obj) goto foo_error; bar_obj = create_foo_obj("bar"); if (!bar_obj) goto bar_error; baz_obj = create_foo_obj("baz"); if (!baz_obj) goto baz_error; return 0; baz_error: destroy_foo_obj(bar_obj); bar_error: destroy_foo_obj(foo_obj); foo_error: kset_unregister(example_kset); return -EINVAL; } static void __exit example_exit(void) { destroy_foo_obj(baz_obj); destroy_foo_obj(bar_obj); destroy_foo_obj(foo_obj); kset_unregister(example_kset); } module_init(example_init); module_exit(example_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Greg Kroah-Hartman <[email protected]>");
linux-master
samples/kobject/kset-example.c
// SPDX-License-Identifier: GPL-2.0 /* * TPS6594 PFSM userspace example * * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/ * * This example shows how to use PFSMs from a userspace application, * on TI j721s2 platform. The PMIC is armed to be triggered by a RTC * alarm to execute state transition (RETENTION to ACTIVE). */ #include <fcntl.h> #include <stdio.h> #include <sys/ioctl.h> #include <unistd.h> #include <linux/rtc.h> #include <linux/tps6594_pfsm.h> #define ALARM_DELTA_SEC 30 #define RTC_A "/dev/rtc0" #define PMIC_NB 3 #define PMIC_A "/dev/pfsm-0-0x48" #define PMIC_B "/dev/pfsm-0-0x4c" #define PMIC_C "/dev/pfsm-2-0x58" static const char * const dev_pfsm[] = {PMIC_A, PMIC_B, PMIC_C}; int main(int argc, char *argv[]) { int i, ret, fd_rtc, fd_pfsm[PMIC_NB] = { 0 }; struct rtc_time rtc_tm; struct pmic_state_opt pmic_opt = { 0 }; unsigned long data; fd_rtc = open(RTC_A, O_RDONLY); if (fd_rtc < 0) { perror("Failed to open RTC device."); goto out; } for (i = 0 ; i < PMIC_NB ; i++) { fd_pfsm[i] = open(dev_pfsm[i], O_RDWR); if (fd_pfsm[i] < 0) { perror("Failed to open PFSM device."); goto out; } } /* Read RTC date/time */ ret = ioctl(fd_rtc, RTC_RD_TIME, &rtc_tm); if (ret < 0) { perror("Failed to read RTC date/time."); goto out; } printf("Current RTC date/time is %d-%d-%d, %02d:%02d:%02d.\n", rtc_tm.tm_mday, rtc_tm.tm_mon + 1, rtc_tm.tm_year + 1900, rtc_tm.tm_hour, rtc_tm.tm_min, rtc_tm.tm_sec); /* Set RTC alarm to ALARM_DELTA_SEC sec in the future, and check for rollover */ rtc_tm.tm_sec += ALARM_DELTA_SEC; if (rtc_tm.tm_sec >= 60) { rtc_tm.tm_sec %= 60; rtc_tm.tm_min++; } if (rtc_tm.tm_min == 60) { rtc_tm.tm_min = 0; rtc_tm.tm_hour++; } if (rtc_tm.tm_hour == 24) rtc_tm.tm_hour = 0; ret = ioctl(fd_rtc, RTC_ALM_SET, &rtc_tm); if (ret < 0) { perror("Failed to set RTC alarm."); goto out; } /* Enable alarm interrupts */ ret = ioctl(fd_rtc, RTC_AIE_ON, 0); if (ret < 0) { perror("Failed to enable alarm interrupts."); goto out; } printf("Waiting %d seconds for alarm...\n", ALARM_DELTA_SEC); /* * Set RETENTION state with options for PMIC_C/B/A respectively. * Since PMIC_A is master, it should be the last one to be configured. */ pmic_opt.ddr_retention = 1; for (i = PMIC_NB - 1 ; i >= 0 ; i--) { printf("Set RETENTION state for PMIC_%d.\n", i); sleep(1); ret = ioctl(fd_pfsm[i], PMIC_SET_RETENTION_STATE, &pmic_opt); if (ret < 0) { perror("Failed to set RETENTION state."); goto out_reset; } } /* This blocks until the alarm ring causes an interrupt */ ret = read(fd_rtc, &data, sizeof(unsigned long)); if (ret < 0) perror("Failed to get RTC alarm."); else puts("Alarm rang.\n"); out_reset: ioctl(fd_rtc, RTC_AIE_OFF, 0); /* Set ACTIVE state for PMIC_A */ ioctl(fd_pfsm[0], PMIC_SET_ACTIVE_STATE, 0); out: for (i = 0 ; i < PMIC_NB ; i++) if (fd_pfsm[i]) close(fd_pfsm[i]); if (fd_rtc) close(fd_rtc); return 0; }
linux-master
samples/pfsm/pfsm-wakeup.c
/* Extract X.509 certificate in DER form from PKCS#11 or PEM. * * Copyright © 2014-2015 Red Hat, Inc. All Rights Reserved. * Copyright © 2015 Intel Corporation. * * Authors: David Howells <[email protected]> * David Woodhouse <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the licence, or (at your option) any later version. */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <stdbool.h> #include <string.h> #include <err.h> #include <openssl/bio.h> #include <openssl/pem.h> #include <openssl/err.h> #include <openssl/engine.h> /* * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. * * Remove this if/when that API is no longer used */ #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #define PKEY_ID_PKCS7 2 static __attribute__((noreturn)) void format(void) { fprintf(stderr, "Usage: extract-cert <source> <dest>\n"); exit(2); } static void display_openssl_errors(int l) { const char *file; char buf[120]; int e, line; if (ERR_peek_error() == 0) return; fprintf(stderr, "At main.c:%d:\n", l); while ((e = ERR_get_error_line(&file, &line))) { ERR_error_string(e, buf); fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); } } static void drain_openssl_errors(void) { const char *file; int line; if (ERR_peek_error() == 0) return; while (ERR_get_error_line(&file, &line)) {} } #define ERR(cond, fmt, ...) \ do { \ bool __cond = (cond); \ display_openssl_errors(__LINE__); \ if (__cond) { \ err(1, fmt, ## __VA_ARGS__); \ } \ } while(0) static const char *key_pass; static BIO *wb; static char *cert_dst; static bool verbose; static void write_cert(X509 *x509) { char buf[200]; if (!wb) { wb = BIO_new_file(cert_dst, "wb"); ERR(!wb, "%s", cert_dst); } X509_NAME_oneline(X509_get_subject_name(x509), buf, sizeof(buf)); ERR(!i2d_X509_bio(wb, x509), "%s", cert_dst); if (verbose) fprintf(stderr, "Extracted cert: %s\n", buf); } int main(int argc, char **argv) { char *cert_src; char *verbose_env; OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); ERR_clear_error(); verbose_env = getenv("KBUILD_VERBOSE"); if (verbose_env && strchr(verbose_env, '1')) verbose = true; key_pass = getenv("KBUILD_SIGN_PIN"); if (argc != 3) format(); cert_src = argv[1]; cert_dst = argv[2]; if (!cert_src[0]) { /* Invoked with no input; create empty file */ FILE *f = fopen(cert_dst, "wb"); ERR(!f, "%s", cert_dst); fclose(f); exit(0); } else if (!strncmp(cert_src, "pkcs11:", 7)) { ENGINE *e; struct { const char *cert_id; X509 *cert; } parms; parms.cert_id = cert_src; parms.cert = NULL; ENGINE_load_builtin_engines(); drain_openssl_errors(); e = ENGINE_by_id("pkcs11"); ERR(!e, "Load PKCS#11 ENGINE"); if (ENGINE_init(e)) drain_openssl_errors(); else ERR(1, "ENGINE_init"); if (key_pass) ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); ERR(!parms.cert, "Get X.509 from PKCS#11"); write_cert(parms.cert); } else { BIO *b; X509 *x509; b = BIO_new_file(cert_src, "rb"); ERR(!b, "%s", cert_src); while (1) { x509 = PEM_read_bio_X509(b, NULL, NULL, NULL); if (wb && !x509) { unsigned long err = ERR_peek_last_error(); if (ERR_GET_LIB(err) == ERR_LIB_PEM && ERR_GET_REASON(err) == PEM_R_NO_START_LINE) { ERR_clear_error(); break; } } ERR(!x509, "%s", cert_src); write_cert(x509); } } BIO_free(wb); return 0; }
linux-master
certs/extract-cert.c
// SPDX-License-Identifier: GPL-2.0-or-later /* System hash blacklist. * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #define pr_fmt(fmt) "blacklist: "fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/key-type.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/uidgid.h> #include <keys/asymmetric-type.h> #include <keys/system_keyring.h> #include "blacklist.h" /* * According to crypto/asymmetric_keys/x509_cert_parser.c:x509_note_pkey_algo(), * the size of the currently longest supported hash algorithm is 512 bits, * which translates into 128 hex characters. */ #define MAX_HASH_LEN 128 #define BLACKLIST_KEY_PERM (KEY_POS_SEARCH | KEY_POS_VIEW | \ KEY_USR_SEARCH | KEY_USR_VIEW) static const char tbs_prefix[] = "tbs"; static const char bin_prefix[] = "bin"; static struct key *blacklist_keyring; #ifdef CONFIG_SYSTEM_REVOCATION_LIST extern __initconst const u8 revocation_certificate_list[]; extern __initconst const unsigned long revocation_certificate_list_size; #endif /* * The description must be a type prefix, a colon and then an even number of * hex digits. The hash is kept in the description. */ static int blacklist_vet_description(const char *desc) { int i, prefix_len, tbs_step = 0, bin_step = 0; /* The following algorithm only works if prefix lengths match. */ BUILD_BUG_ON(sizeof(tbs_prefix) != sizeof(bin_prefix)); prefix_len = sizeof(tbs_prefix) - 1; for (i = 0; *desc; desc++, i++) { if (*desc == ':') { if (tbs_step == prefix_len) goto found_colon; if (bin_step == prefix_len) goto found_colon; return -EINVAL; } if (i >= prefix_len) return -EINVAL; if (*desc == tbs_prefix[i]) tbs_step++; if (*desc == bin_prefix[i]) bin_step++; } return -EINVAL; found_colon: desc++; for (i = 0; *desc && i < MAX_HASH_LEN; desc++, i++) { if (!isxdigit(*desc) || isupper(*desc)) return -EINVAL; } if (*desc) /* The hash is greater than MAX_HASH_LEN. */ return -ENOPKG; /* Checks for an even number of hexadecimal characters. */ if (i == 0 || i & 1) return -EINVAL; return 0; } static int blacklist_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { #ifdef CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE int err; #endif /* Sets safe default permissions for keys loaded by user space. */ key->perm = BLACKLIST_KEY_PERM; /* * Skips the authentication step for builtin hashes, they are not * signed but still trusted. */ if (key->flags & (1 << KEY_FLAG_BUILTIN)) goto out; #ifdef CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE /* * Verifies the description's PKCS#7 signature against the builtin * trusted keyring. */ err = verify_pkcs7_signature(key->description, strlen(key->description), prep->data, prep->datalen, NULL, VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); if (err) return err; #else /* * It should not be possible to come here because the keyring doesn't * have KEY_USR_WRITE and the only other way to call this function is * for builtin hashes. */ WARN_ON_ONCE(1); return -EPERM; #endif out: return generic_key_instantiate(key, prep); } static int blacklist_key_update(struct key *key, struct key_preparsed_payload *prep) { return -EPERM; } static void blacklist_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); } static struct key_type key_type_blacklist = { .name = "blacklist", .vet_description = blacklist_vet_description, .instantiate = blacklist_key_instantiate, .update = blacklist_key_update, .describe = blacklist_describe, }; static char *get_raw_hash(const u8 *hash, size_t hash_len, enum blacklist_hash_type hash_type) { size_t type_len; const char *type_prefix; char *buffer, *p; switch (hash_type) { case BLACKLIST_HASH_X509_TBS: type_len = sizeof(tbs_prefix) - 1; type_prefix = tbs_prefix; break; case BLACKLIST_HASH_BINARY: type_len = sizeof(bin_prefix) - 1; type_prefix = bin_prefix; break; default: WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } buffer = kmalloc(type_len + 1 + hash_len * 2 + 1, GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); p = memcpy(buffer, type_prefix, type_len); p += type_len; *p++ = ':'; bin2hex(p, hash, hash_len); p += hash_len * 2; *p = '\0'; return buffer; } /** * mark_raw_hash_blacklisted - Add a hash to the system blacklist * @hash: The hash as a hex string with a type prefix (eg. "tbs:23aa429783") */ static int mark_raw_hash_blacklisted(const char *hash) { key_ref_t key; key = key_create(make_key_ref(blacklist_keyring, true), "blacklist", hash, NULL, 0, BLACKLIST_KEY_PERM, KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN); if (IS_ERR(key)) { if (PTR_ERR(key) == -EEXIST) pr_warn("Duplicate blacklisted hash %s\n", hash); else pr_err("Problem blacklisting hash %s: %pe\n", hash, key); return PTR_ERR(key); } return 0; } int mark_hash_blacklisted(const u8 *hash, size_t hash_len, enum blacklist_hash_type hash_type) { const char *buffer; int err; buffer = get_raw_hash(hash, hash_len, hash_type); if (IS_ERR(buffer)) return PTR_ERR(buffer); err = mark_raw_hash_blacklisted(buffer); kfree(buffer); return err; } /** * is_hash_blacklisted - Determine if a hash is blacklisted * @hash: The hash to be checked as a binary blob * @hash_len: The length of the binary hash * @hash_type: Type of hash */ int is_hash_blacklisted(const u8 *hash, size_t hash_len, enum blacklist_hash_type hash_type) { key_ref_t kref; const char *buffer; int ret = 0; buffer = get_raw_hash(hash, hash_len, hash_type); if (IS_ERR(buffer)) return PTR_ERR(buffer); kref = keyring_search(make_key_ref(blacklist_keyring, true), &key_type_blacklist, buffer, false); if (!IS_ERR(kref)) { key_ref_put(kref); ret = -EKEYREJECTED; } kfree(buffer); return ret; } EXPORT_SYMBOL_GPL(is_hash_blacklisted); int is_binary_blacklisted(const u8 *hash, size_t hash_len) { if (is_hash_blacklisted(hash, hash_len, BLACKLIST_HASH_BINARY) == -EKEYREJECTED) return -EPERM; return 0; } EXPORT_SYMBOL_GPL(is_binary_blacklisted); #ifdef CONFIG_SYSTEM_REVOCATION_LIST /** * add_key_to_revocation_list - Add a revocation certificate to the blacklist * @data: The data blob containing the certificate * @size: The size of data blob */ int add_key_to_revocation_list(const char *data, size_t size) { key_ref_t key; key = key_create_or_update(make_key_ref(blacklist_keyring, true), "asymmetric", NULL, data, size, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN | KEY_ALLOC_BYPASS_RESTRICTION); if (IS_ERR(key)) { pr_err("Problem with revocation key (%ld)\n", PTR_ERR(key)); return PTR_ERR(key); } return 0; } /** * is_key_on_revocation_list - Determine if the key for a PKCS#7 message is revoked * @pkcs7: The PKCS#7 message to check */ int is_key_on_revocation_list(struct pkcs7_message *pkcs7) { int ret; ret = pkcs7_validate_trust(pkcs7, blacklist_keyring); if (ret == 0) return -EKEYREJECTED; return -ENOKEY; } #endif static int restrict_link_for_blacklist(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restrict_key) { if (type == &key_type_blacklist) return 0; return -EOPNOTSUPP; } /* * Initialise the blacklist * * The blacklist_init() function is registered as an initcall via * device_initcall(). As a result if the blacklist_init() function fails for * any reason the kernel continues to execute. While cleanly returning -ENODEV * could be acceptable for some non-critical kernel parts, if the blacklist * keyring fails to load it defeats the certificate/key based deny list for * signed modules. If a critical piece of security functionality that users * expect to be present fails to initialize, panic()ing is likely the right * thing to do. */ static int __init blacklist_init(void) { const char *const *bl; struct key_restriction *restriction; if (register_key_type(&key_type_blacklist) < 0) panic("Can't allocate system blacklist key type\n"); restriction = kzalloc(sizeof(*restriction), GFP_KERNEL); if (!restriction) panic("Can't allocate blacklist keyring restriction\n"); restriction->check = restrict_link_for_blacklist; blacklist_keyring = keyring_alloc(".blacklist", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_WRITE | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH #ifdef CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE | KEY_USR_WRITE #endif , KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_SET_KEEP, restriction, NULL); if (IS_ERR(blacklist_keyring)) panic("Can't allocate system blacklist keyring\n"); for (bl = blacklist_hashes; *bl; bl++) if (mark_raw_hash_blacklisted(*bl) < 0) pr_err("- blacklisting failed\n"); return 0; } /* * Must be initialised before we try and load the keys into the keyring. */ device_initcall(blacklist_init); #ifdef CONFIG_SYSTEM_REVOCATION_LIST /* * Load the compiled-in list of revocation X.509 certificates. */ static __init int load_revocation_certificate_list(void) { if (revocation_certificate_list_size) pr_notice("Loading compiled-in revocation X.509 certificates\n"); return x509_load_certificate_list(revocation_certificate_list, revocation_certificate_list_size, blacklist_keyring); } late_initcall(load_revocation_certificate_list); #endif
linux-master
certs/blacklist.c
// SPDX-License-Identifier: GPL-2.0-or-later /* System trusted keyring for trusted public keys * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/uidgid.h> #include <linux/verification.h> #include <keys/asymmetric-type.h> #include <keys/system_keyring.h> #include <crypto/pkcs7.h> static struct key *builtin_trusted_keys; #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING static struct key *secondary_trusted_keys; #endif #ifdef CONFIG_INTEGRITY_MACHINE_KEYRING static struct key *machine_trusted_keys; #endif #ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING static struct key *platform_trusted_keys; #endif extern __initconst const u8 system_certificate_list[]; extern __initconst const unsigned long system_certificate_list_size; extern __initconst const unsigned long module_cert_size; /** * restrict_link_by_builtin_trusted - Restrict keyring addition by built-in CA * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @restriction_key: A ring of keys that can be used to vouch for the new cert. * * Restrict the addition of keys into a keyring based on the key-to-be-added * being vouched for by a key in the built in system keyring. */ int restrict_link_by_builtin_trusted(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key) { return restrict_link_by_signature(dest_keyring, type, payload, builtin_trusted_keys); } /** * restrict_link_by_digsig_builtin - Restrict digitalSignature key additions by the built-in keyring * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @restriction_key: A ring of keys that can be used to vouch for the new cert. * * Restrict the addition of keys into a keyring based on the key-to-be-added * being vouched for by a key in the built in system keyring. The new key * must have the digitalSignature usage field set. */ int restrict_link_by_digsig_builtin(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key) { return restrict_link_by_digsig(dest_keyring, type, payload, builtin_trusted_keys); } #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING /** * restrict_link_by_builtin_and_secondary_trusted - Restrict keyring * addition by both built-in and secondary keyrings. * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @restrict_key: A ring of keys that can be used to vouch for the new cert. * * Restrict the addition of keys into a keyring based on the key-to-be-added * being vouched for by a key in either the built-in or the secondary system * keyrings. */ int restrict_link_by_builtin_and_secondary_trusted( struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restrict_key) { /* If we have a secondary trusted keyring, then that contains a link * through to the builtin keyring and the search will follow that link. */ if (type == &key_type_keyring && dest_keyring == secondary_trusted_keys && payload == &builtin_trusted_keys->payload) /* Allow the builtin keyring to be added to the secondary */ return 0; return restrict_link_by_signature(dest_keyring, type, payload, secondary_trusted_keys); } /** * restrict_link_by_digsig_builtin_and_secondary - Restrict by digitalSignature. * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @restrict_key: A ring of keys that can be used to vouch for the new cert. * * Restrict the addition of keys into a keyring based on the key-to-be-added * being vouched for by a key in either the built-in or the secondary system * keyrings. The new key must have the digitalSignature usage field set. */ int restrict_link_by_digsig_builtin_and_secondary(struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restrict_key) { /* If we have a secondary trusted keyring, then that contains a link * through to the builtin keyring and the search will follow that link. */ if (type == &key_type_keyring && dest_keyring == secondary_trusted_keys && payload == &builtin_trusted_keys->payload) /* Allow the builtin keyring to be added to the secondary */ return 0; return restrict_link_by_digsig(dest_keyring, type, payload, secondary_trusted_keys); } /* * Allocate a struct key_restriction for the "builtin and secondary trust" * keyring. Only for use in system_trusted_keyring_init(). */ static __init struct key_restriction *get_builtin_and_secondary_restriction(void) { struct key_restriction *restriction; restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL); if (!restriction) panic("Can't allocate secondary trusted keyring restriction\n"); if (IS_ENABLED(CONFIG_INTEGRITY_MACHINE_KEYRING)) restriction->check = restrict_link_by_builtin_secondary_and_machine; else restriction->check = restrict_link_by_builtin_and_secondary_trusted; return restriction; } /** * add_to_secondary_keyring - Add to secondary keyring. * @source: Source of key * @data: The blob holding the key * @len: The length of the data blob * * Add a key to the secondary keyring. The key must be vouched for by a key in the builtin, * machine or secondary keyring itself. */ void __init add_to_secondary_keyring(const char *source, const void *data, size_t len) { key_ref_t key; key_perm_t perm; perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW; key = key_create_or_update(make_key_ref(secondary_trusted_keys, 1), "asymmetric", NULL, data, len, perm, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(key)) { pr_err("Problem loading X.509 certificate from %s to secondary keyring %ld\n", source, PTR_ERR(key)); return; } pr_notice("Loaded X.509 cert '%s'\n", key_ref_to_ptr(key)->description); key_ref_put(key); } #endif #ifdef CONFIG_INTEGRITY_MACHINE_KEYRING void __init set_machine_trusted_keys(struct key *keyring) { machine_trusted_keys = keyring; if (key_link(secondary_trusted_keys, machine_trusted_keys) < 0) panic("Can't link (machine) trusted keyrings\n"); } /** * restrict_link_by_builtin_secondary_and_machine - Restrict keyring addition. * @dest_keyring: Keyring being linked to. * @type: The type of key being added. * @payload: The payload of the new key. * @restrict_key: A ring of keys that can be used to vouch for the new cert. * * Restrict the addition of keys into a keyring based on the key-to-be-added * being vouched for by a key in either the built-in, the secondary, or * the machine keyrings. */ int restrict_link_by_builtin_secondary_and_machine( struct key *dest_keyring, const struct key_type *type, const union key_payload *payload, struct key *restrict_key) { if (machine_trusted_keys && type == &key_type_keyring && dest_keyring == secondary_trusted_keys && payload == &machine_trusted_keys->payload) /* Allow the machine keyring to be added to the secondary */ return 0; return restrict_link_by_builtin_and_secondary_trusted(dest_keyring, type, payload, restrict_key); } #endif /* * Create the trusted keyrings */ static __init int system_trusted_keyring_init(void) { pr_notice("Initialise system trusted keyrings\n"); builtin_trusted_keys = keyring_alloc(".builtin_trusted_keys", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(builtin_trusted_keys)) panic("Can't allocate builtin trusted keyring\n"); #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING secondary_trusted_keys = keyring_alloc(".secondary_trusted_keys", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH | KEY_USR_WRITE), KEY_ALLOC_NOT_IN_QUOTA, get_builtin_and_secondary_restriction(), NULL); if (IS_ERR(secondary_trusted_keys)) panic("Can't allocate secondary trusted keyring\n"); if (key_link(secondary_trusted_keys, builtin_trusted_keys) < 0) panic("Can't link trusted keyrings\n"); #endif return 0; } /* * Must be initialised before we try and load the keys into the keyring. */ device_initcall(system_trusted_keyring_init); __init int load_module_cert(struct key *keyring) { if (!IS_ENABLED(CONFIG_IMA_APPRAISE_MODSIG)) return 0; pr_notice("Loading compiled-in module X.509 certificates\n"); return x509_load_certificate_list(system_certificate_list, module_cert_size, keyring); } /* * Load the compiled-in list of X.509 certificates. */ static __init int load_system_certificate_list(void) { const u8 *p; unsigned long size; pr_notice("Loading compiled-in X.509 certificates\n"); #ifdef CONFIG_MODULE_SIG p = system_certificate_list; size = system_certificate_list_size; #else p = system_certificate_list + module_cert_size; size = system_certificate_list_size - module_cert_size; #endif return x509_load_certificate_list(p, size, builtin_trusted_keys); } late_initcall(load_system_certificate_list); #ifdef CONFIG_SYSTEM_DATA_VERIFICATION /** * verify_pkcs7_message_sig - Verify a PKCS#7-based signature on system data. * @data: The data to be verified (NULL if expecting internal data). * @len: Size of @data. * @pkcs7: The PKCS#7 message that is the signature. * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only, * (void *)1UL for all trusted keys). * @usage: The use to which the key is being put. * @view_content: Callback to gain access to content. * @ctx: Context for callback. */ int verify_pkcs7_message_sig(const void *data, size_t len, struct pkcs7_message *pkcs7, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx) { int ret; /* The data should be detached - so we need to supply it. */ if (data && pkcs7_supply_detached_data(pkcs7, data, len) < 0) { pr_err("PKCS#7 signature with non-detached data\n"); ret = -EBADMSG; goto error; } ret = pkcs7_verify(pkcs7, usage); if (ret < 0) goto error; ret = is_key_on_revocation_list(pkcs7); if (ret != -ENOKEY) { pr_devel("PKCS#7 key is on revocation list\n"); goto error; } if (!trusted_keys) { trusted_keys = builtin_trusted_keys; } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) { #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING trusted_keys = secondary_trusted_keys; #else trusted_keys = builtin_trusted_keys; #endif } else if (trusted_keys == VERIFY_USE_PLATFORM_KEYRING) { #ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING trusted_keys = platform_trusted_keys; #else trusted_keys = NULL; #endif if (!trusted_keys) { ret = -ENOKEY; pr_devel("PKCS#7 platform keyring is not available\n"); goto error; } } ret = pkcs7_validate_trust(pkcs7, trusted_keys); if (ret < 0) { if (ret == -ENOKEY) pr_devel("PKCS#7 signature not signed with a trusted key\n"); goto error; } if (view_content) { size_t asn1hdrlen; ret = pkcs7_get_content_data(pkcs7, &data, &len, &asn1hdrlen); if (ret < 0) { if (ret == -ENODATA) pr_devel("PKCS#7 message does not contain data\n"); goto error; } ret = view_content(ctx, data, len, asn1hdrlen); } error: pr_devel("<==%s() = %d\n", __func__, ret); return ret; } /** * verify_pkcs7_signature - Verify a PKCS#7-based signature on system data. * @data: The data to be verified (NULL if expecting internal data). * @len: Size of @data. * @raw_pkcs7: The PKCS#7 message that is the signature. * @pkcs7_len: The size of @raw_pkcs7. * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only, * (void *)1UL for all trusted keys). * @usage: The use to which the key is being put. * @view_content: Callback to gain access to content. * @ctx: Context for callback. */ int verify_pkcs7_signature(const void *data, size_t len, const void *raw_pkcs7, size_t pkcs7_len, struct key *trusted_keys, enum key_being_used_for usage, int (*view_content)(void *ctx, const void *data, size_t len, size_t asn1hdrlen), void *ctx) { struct pkcs7_message *pkcs7; int ret; pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len); if (IS_ERR(pkcs7)) return PTR_ERR(pkcs7); ret = verify_pkcs7_message_sig(data, len, pkcs7, trusted_keys, usage, view_content, ctx); pkcs7_free_message(pkcs7); pr_devel("<==%s() = %d\n", __func__, ret); return ret; } EXPORT_SYMBOL_GPL(verify_pkcs7_signature); #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ #ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING void __init set_platform_trusted_keys(struct key *keyring) { platform_trusted_keys = keyring; } #endif
linux-master
certs/system_keyring.c
// SPDX-License-Identifier: GPL-2.0 #include "blacklist.h" const char __initconst *const blacklist_hashes[] = { #include "blacklist_hash_list" };
linux-master
certs/blacklist_hashes.c
/* fp_trig.c: floating-point math routines for the Linux-m68k floating point emulator. Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel. I hereby give permission, free of charge, to copy, modify, and redistribute this software, in source or binary form, provided that the above copyright notice and the following disclaimer are included in all such copies. THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL OR IMPLIED. */ #include "fp_emu.h" static const struct fp_ext fp_one = { .exp = 0x3fff, }; extern struct fp_ext *fp_fadd(struct fp_ext *dest, const struct fp_ext *src); extern struct fp_ext *fp_fdiv(struct fp_ext *dest, const struct fp_ext *src); struct fp_ext * fp_fsqrt(struct fp_ext *dest, struct fp_ext *src) { struct fp_ext tmp, src2; int i, exp; dprint(PINSTR, "fsqrt\n"); fp_monadic_check(dest, src); if (IS_ZERO(dest)) return dest; if (dest->sign) { fp_set_nan(dest); return dest; } if (IS_INF(dest)) return dest; /* * sqrt(m) * 2^(p) , if e = 2*p * sqrt(m*2^e) = * sqrt(2*m) * 2^(p) , if e = 2*p + 1 * * So we use the last bit of the exponent to decide whether to * use the m or 2*m. * * Since only the fractional part of the mantissa is stored and * the integer part is assumed to be one, we place a 1 or 2 into * the fixed point representation. */ exp = dest->exp; dest->exp = 0x3FFF; if (!(exp & 1)) /* lowest bit of exponent is set */ dest->exp++; fp_copy_ext(&src2, dest); /* * The taylor row around a for sqrt(x) is: * sqrt(x) = sqrt(a) + 1/(2*sqrt(a))*(x-a) + R * With a=1 this gives: * sqrt(x) = 1 + 1/2*(x-1) * = 1/2*(1+x) */ fp_fadd(dest, &fp_one); dest->exp--; /* * 1/2 */ /* * We now apply the newton rule to the function * f(x) := x^2 - r * which has a null point on x = sqrt(r). * * It gives: * x' := x - f(x)/f'(x) * = x - (x^2 -r)/(2*x) * = x - (x - r/x)/2 * = (2*x - x + r/x)/2 * = (x + r/x)/2 */ for (i = 0; i < 9; i++) { fp_copy_ext(&tmp, &src2); fp_fdiv(&tmp, dest); fp_fadd(dest, &tmp); dest->exp--; } dest->exp += (exp - 0x3FFF) / 2; return dest; } struct fp_ext * fp_fetoxm1(struct fp_ext *dest, struct fp_ext *src) { uprint("fetoxm1\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fetox(struct fp_ext *dest, struct fp_ext *src) { uprint("fetox\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_ftwotox(struct fp_ext *dest, struct fp_ext *src) { uprint("ftwotox\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_ftentox(struct fp_ext *dest, struct fp_ext *src) { uprint("ftentox\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_flogn(struct fp_ext *dest, struct fp_ext *src) { uprint("flogn\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_flognp1(struct fp_ext *dest, struct fp_ext *src) { uprint("flognp1\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_flog10(struct fp_ext *dest, struct fp_ext *src) { uprint("flog10\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_flog2(struct fp_ext *dest, struct fp_ext *src) { uprint("flog2\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fgetexp(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fgetexp\n"); fp_monadic_check(dest, src); if (IS_INF(dest)) { fp_set_nan(dest); return dest; } if (IS_ZERO(dest)) return dest; fp_conv_long2ext(dest, (int)dest->exp - 0x3FFF); fp_normalize_ext(dest); return dest; } struct fp_ext * fp_fgetman(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fgetman\n"); fp_monadic_check(dest, src); if (IS_ZERO(dest)) return dest; if (IS_INF(dest)) return dest; dest->exp = 0x3FFF; return dest; }
linux-master
arch/m68k/math-emu/fp_log.c
// SPDX-License-Identifier: GPL-2.0-or-later /* fp_arith.c: floating-point math routines for the Linux-m68k floating point emulator. Copyright (c) 1998-1999 David Huggins-Daines. Somewhat based on the AlphaLinux floating point emulator, by David Mosberger-Tang. */ #include "fp_emu.h" #include "multi_arith.h" #include "fp_arith.h" const struct fp_ext fp_QNaN = { .exp = 0x7fff, .mant = { .m64 = ~0 } }; const struct fp_ext fp_Inf = { .exp = 0x7fff, }; /* let's start with the easy ones */ struct fp_ext * fp_fabs(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fabs\n"); fp_monadic_check(dest, src); dest->sign = 0; return dest; } struct fp_ext * fp_fneg(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fneg\n"); fp_monadic_check(dest, src); dest->sign = !dest->sign; return dest; } /* Now, the slightly harder ones */ /* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB, FDSUB, and FCMP instructions. */ struct fp_ext * fp_fadd(struct fp_ext *dest, struct fp_ext *src) { int diff; dprint(PINSTR, "fadd\n"); fp_dyadic_check(dest, src); if (IS_INF(dest)) { /* infinity - infinity == NaN */ if (IS_INF(src) && (src->sign != dest->sign)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { fp_copy_ext(dest, src); return dest; } if (IS_ZERO(dest)) { if (IS_ZERO(src)) { if (src->sign != dest->sign) { if (FPDATA->rnd == FPCR_ROUND_RM) dest->sign = 1; else dest->sign = 0; } } else fp_copy_ext(dest, src); return dest; } dest->lowmant = src->lowmant = 0; if ((diff = dest->exp - src->exp) > 0) fp_denormalize(src, diff); else if ((diff = -diff) > 0) fp_denormalize(dest, diff); if (dest->sign == src->sign) { if (fp_addmant(dest, src)) if (!fp_addcarry(dest)) return dest; } else { if (dest->mant.m64 < src->mant.m64) { fp_submant(dest, src, dest); dest->sign = !dest->sign; } else fp_submant(dest, dest, src); } return dest; } /* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB instructions. Remember that the arguments are in assembler-syntax order! */ struct fp_ext * fp_fsub(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fsub "); src->sign = !src->sign; return fp_fadd(dest, src); } struct fp_ext * fp_fcmp(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fcmp "); FPDATA->temp[1] = *dest; src->sign = !src->sign; return fp_fadd(&FPDATA->temp[1], src); } struct fp_ext * fp_ftst(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "ftst\n"); (void)dest; return src; } struct fp_ext * fp_fmul(struct fp_ext *dest, struct fp_ext *src) { union fp_mant128 temp; int exp; dprint(PINSTR, "fmul\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { if (IS_ZERO(src)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { if (IS_ZERO(dest)) fp_set_nan(dest); else fp_copy_ext(dest, src); return dest; } /* Of course, as we all know, zero * anything = zero. You may not have known that it might be a positive or negative zero... */ if (IS_ZERO(dest) || IS_ZERO(src)) { dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } exp = dest->exp + src->exp - 0x3ffe; /* shift up the mantissa for denormalized numbers, so that the highest bit is set, this makes the shift of the result below easier */ if ((long)dest->mant.m32[0] >= 0) exp -= fp_overnormalize(dest); if ((long)src->mant.m32[0] >= 0) exp -= fp_overnormalize(src); /* now, do a 64-bit multiply with expansion */ fp_multiplymant(&temp, dest, src); /* normalize it back to 64 bits and stuff it back into the destination struct */ if ((long)temp.m32[0] > 0) { exp--; fp_putmant128(dest, &temp, 1); } else fp_putmant128(dest, &temp, 0); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } /* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and FSGLDIV instructions. Note that the order of the operands is counter-intuitive: instead of src / dest, the result is actually dest / src. */ struct fp_ext * fp_fdiv(struct fp_ext *dest, struct fp_ext *src) { union fp_mant128 temp; int exp; dprint(PINSTR, "fdiv\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { /* infinity / infinity = NaN (quiet, as always) */ if (IS_INF(src)) fp_set_nan(dest); /* infinity / anything else = infinity (with appropriate sign) */ return dest; } if (IS_INF(src)) { /* anything / infinity = zero (with appropriate sign) */ dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } /* zeroes */ if (IS_ZERO(dest)) { /* zero / zero = NaN */ if (IS_ZERO(src)) fp_set_nan(dest); /* zero / anything else = zero */ return dest; } if (IS_ZERO(src)) { /* anything / zero = infinity (with appropriate sign) */ fp_set_sr(FPSR_EXC_DZ); dest->exp = 0x7fff; dest->mant.m64 = 0; return dest; } exp = dest->exp - src->exp + 0x3fff; /* shift up the mantissa for denormalized numbers, so that the highest bit is set, this makes lots of things below easier */ if ((long)dest->mant.m32[0] >= 0) exp -= fp_overnormalize(dest); if ((long)src->mant.m32[0] >= 0) exp -= fp_overnormalize(src); /* now, do the 64-bit divide */ fp_dividemant(&temp, dest, src); /* normalize it back to 64 bits and stuff it back into the destination struct */ if (!temp.m32[0]) { exp--; fp_putmant128(dest, &temp, 32); } else fp_putmant128(dest, &temp, 31); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } struct fp_ext * fp_fsglmul(struct fp_ext *dest, struct fp_ext *src) { int exp; dprint(PINSTR, "fsglmul\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { if (IS_ZERO(src)) fp_set_nan(dest); return dest; } if (IS_INF(src)) { if (IS_ZERO(dest)) fp_set_nan(dest); else fp_copy_ext(dest, src); return dest; } /* Of course, as we all know, zero * anything = zero. You may not have known that it might be a positive or negative zero... */ if (IS_ZERO(dest) || IS_ZERO(src)) { dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } exp = dest->exp + src->exp - 0x3ffe; /* do a 32-bit multiply */ fp_mul64(dest->mant.m32[0], dest->mant.m32[1], dest->mant.m32[0] & 0xffffff00, src->mant.m32[0] & 0xffffff00); if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } struct fp_ext * fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src) { int exp; unsigned long quot, rem; dprint(PINSTR, "fsgldiv\n"); fp_dyadic_check(dest, src); /* calculate the correct sign now, as it's necessary for infinities */ dest->sign = src->sign ^ dest->sign; /* Handle infinities */ if (IS_INF(dest)) { /* infinity / infinity = NaN (quiet, as always) */ if (IS_INF(src)) fp_set_nan(dest); /* infinity / anything else = infinity (with approprate sign) */ return dest; } if (IS_INF(src)) { /* anything / infinity = zero (with appropriate sign) */ dest->exp = 0; dest->mant.m64 = 0; dest->lowmant = 0; return dest; } /* zeroes */ if (IS_ZERO(dest)) { /* zero / zero = NaN */ if (IS_ZERO(src)) fp_set_nan(dest); /* zero / anything else = zero */ return dest; } if (IS_ZERO(src)) { /* anything / zero = infinity (with appropriate sign) */ fp_set_sr(FPSR_EXC_DZ); dest->exp = 0x7fff; dest->mant.m64 = 0; return dest; } exp = dest->exp - src->exp + 0x3fff; dest->mant.m32[0] &= 0xffffff00; src->mant.m32[0] &= 0xffffff00; /* do the 32-bit divide */ if (dest->mant.m32[0] >= src->mant.m32[0]) { fp_sub64(dest->mant, src->mant); fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); dest->mant.m32[0] = 0x80000000 | (quot >> 1); dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */ } else { fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]); dest->mant.m32[0] = quot; dest->mant.m32[1] = rem; /* only for rounding */ exp--; } if (exp >= 0x7fff) { fp_set_ovrflw(dest); return dest; } dest->exp = exp; if (exp < 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -exp); } return dest; } /* fp_roundint: Internal rounding function for use by several of these emulated instructions. This one rounds off the fractional part using the rounding mode specified. */ static void fp_roundint(struct fp_ext *dest, int mode) { union fp_mant64 oldmant; unsigned long mask; if (!fp_normalize_ext(dest)) return; /* infinities and zeroes */ if (IS_INF(dest) || IS_ZERO(dest)) return; /* first truncate the lower bits */ oldmant = dest->mant; switch (dest->exp) { case 0 ... 0x3ffe: dest->mant.m64 = 0; break; case 0x3fff ... 0x401e: dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp); dest->mant.m32[1] = 0; if (oldmant.m64 == dest->mant.m64) return; break; case 0x401f ... 0x403e: dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp); if (oldmant.m32[1] == dest->mant.m32[1]) return; break; default: return; } fp_set_sr(FPSR_EXC_INEX2); /* We might want to normalize upwards here... however, since we know that this is only called on the output of fp_fdiv, or with the input to fp_fint or fp_fintrz, and the inputs to all these functions are either normal or denormalized (no subnormals allowed!), there's really no need. In the case of fp_fdiv, observe that 0x80000000 / 0xffff = 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the smallest possible normal dividend and the largest possible normal divisor will still produce a normal quotient, therefore, (normal << 64) / normal is normal in all cases) */ switch (mode) { case FPCR_ROUND_RN: switch (dest->exp) { case 0 ... 0x3ffd: return; case 0x3ffe: /* As noted above, the input is always normal, so the guard bit (bit 63) is always set. therefore, the only case in which we will NOT round to 1.0 is when the input is exactly 0.5. */ if (oldmant.m64 == (1ULL << 63)) return; break; case 0x3fff ... 0x401d: mask = 1 << (0x401d - dest->exp); if (!(oldmant.m32[0] & mask)) return; if (oldmant.m32[0] & (mask << 1)) break; if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) && !oldmant.m32[1]) return; break; case 0x401e: if (oldmant.m32[1] & 0x80000000) return; if (oldmant.m32[0] & 1) break; if (!(oldmant.m32[1] << 1)) return; break; case 0x401f ... 0x403d: mask = 1 << (0x403d - dest->exp); if (!(oldmant.m32[1] & mask)) return; if (oldmant.m32[1] & (mask << 1)) break; if (!(oldmant.m32[1] << (dest->exp - 0x401d))) return; break; default: return; } break; case FPCR_ROUND_RZ: return; default: if (dest->sign ^ (mode - FPCR_ROUND_RM)) break; return; } switch (dest->exp) { case 0 ... 0x3ffe: dest->exp = 0x3fff; dest->mant.m64 = 1ULL << 63; break; case 0x3fff ... 0x401e: mask = 1 << (0x401e - dest->exp); if (dest->mant.m32[0] += mask) break; dest->mant.m32[0] = 0x80000000; dest->exp++; break; case 0x401f ... 0x403e: mask = 1 << (0x403e - dest->exp); if (dest->mant.m32[1] += mask) break; if (dest->mant.m32[0] += 1) break; dest->mant.m32[0] = 0x80000000; dest->exp++; break; } } /* modrem_kernel: Implementation of the FREM and FMOD instructions (which are exactly the same, except for the rounding used on the intermediate value) */ static struct fp_ext * modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode) { struct fp_ext tmp; fp_dyadic_check(dest, src); /* Infinities and zeros */ if (IS_INF(dest) || IS_ZERO(src)) { fp_set_nan(dest); return dest; } if (IS_ZERO(dest) || IS_INF(src)) return dest; /* FIXME: there is almost certainly a smarter way to do this */ fp_copy_ext(&tmp, dest); fp_fdiv(&tmp, src); /* NOTE: src might be modified */ fp_roundint(&tmp, mode); fp_fmul(&tmp, src); fp_fsub(dest, &tmp); /* set the quotient byte */ fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7)); return dest; } /* fp_fmod: Implements the kernel of the FMOD instruction. Again, the argument order is backwards. The result, as defined in the Motorola manuals, is: fmod(src,dest) = (dest - (src * floor(dest / src))) */ struct fp_ext * fp_fmod(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fmod\n"); return modrem_kernel(dest, src, FPCR_ROUND_RZ); } /* fp_frem: Implements the kernel of the FREM instruction. frem(src,dest) = (dest - (src * round(dest / src))) */ struct fp_ext * fp_frem(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "frem\n"); return modrem_kernel(dest, src, FPCR_ROUND_RN); } struct fp_ext * fp_fint(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fint\n"); fp_copy_ext(dest, src); fp_roundint(dest, FPDATA->rnd); return dest; } struct fp_ext * fp_fintrz(struct fp_ext *dest, struct fp_ext *src) { dprint(PINSTR, "fintrz\n"); fp_copy_ext(dest, src); fp_roundint(dest, FPCR_ROUND_RZ); return dest; } struct fp_ext * fp_fscale(struct fp_ext *dest, struct fp_ext *src) { int scale, oldround; dprint(PINSTR, "fscale\n"); fp_dyadic_check(dest, src); /* Infinities */ if (IS_INF(src)) { fp_set_nan(dest); return dest; } if (IS_INF(dest)) return dest; /* zeroes */ if (IS_ZERO(src) || IS_ZERO(dest)) return dest; /* Source exponent out of range */ if (src->exp >= 0x400c) { fp_set_ovrflw(dest); return dest; } /* src must be rounded with round to zero. */ oldround = FPDATA->rnd; FPDATA->rnd = FPCR_ROUND_RZ; scale = fp_conv_ext2long(src); FPDATA->rnd = oldround; /* new exponent */ scale += dest->exp; if (scale >= 0x7fff) { fp_set_ovrflw(dest); } else if (scale <= 0) { fp_set_sr(FPSR_EXC_UNFL); fp_denormalize(dest, -scale); } else dest->exp = scale; return dest; }
linux-master
arch/m68k/math-emu/fp_arith.c
/* fp_trig.c: floating-point math routines for the Linux-m68k floating point emulator. Copyright (c) 1998-1999 David Huggins-Daines / Roman Zippel. I hereby give permission, free of charge, to copy, modify, and redistribute this software, in source or binary form, provided that the above copyright notice and the following disclaimer are included in all such copies. THIS SOFTWARE IS PROVIDED "AS IS", WITH ABSOLUTELY NO WARRANTY, REAL OR IMPLIED. */ #include "fp_emu.h" #include "fp_trig.h" struct fp_ext * fp_fsin(struct fp_ext *dest, struct fp_ext *src) { uprint("fsin\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fcos(struct fp_ext *dest, struct fp_ext *src) { uprint("fcos\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_ftan(struct fp_ext *dest, struct fp_ext *src) { uprint("ftan\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fasin(struct fp_ext *dest, struct fp_ext *src) { uprint("fasin\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_facos(struct fp_ext *dest, struct fp_ext *src) { uprint("facos\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fatan(struct fp_ext *dest, struct fp_ext *src) { uprint("fatan\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fsinh(struct fp_ext *dest, struct fp_ext *src) { uprint("fsinh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fcosh(struct fp_ext *dest, struct fp_ext *src) { uprint("fcosh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_ftanh(struct fp_ext *dest, struct fp_ext *src) { uprint("ftanh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fatanh(struct fp_ext *dest, struct fp_ext *src) { uprint("fatanh\n"); fp_monadic_check(dest, src); return dest; } struct fp_ext * fp_fsincos0(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos0\n"); return dest; } struct fp_ext * fp_fsincos1(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos1\n"); return dest; } struct fp_ext * fp_fsincos2(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos2\n"); return dest; } struct fp_ext * fp_fsincos3(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos3\n"); return dest; } struct fp_ext * fp_fsincos4(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos4\n"); return dest; } struct fp_ext * fp_fsincos5(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos5\n"); return dest; } struct fp_ext * fp_fsincos6(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos6\n"); return dest; } struct fp_ext * fp_fsincos7(struct fp_ext *dest, struct fp_ext *src) { uprint("fsincos7\n"); return dest; }
linux-master
arch/m68k/math-emu/fp_trig.c
// SPDX-License-Identifier: GPL-2.0 /* * Real Time Clock interface for Linux on the BVME6000 * * Based on the PC driver by Paul Gortmaker. */ #define RTC_VERSION "1.00" #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/rtc.h> /* For struct rtc_time and ioctls, etc */ #include <linux/bcd.h> #include <asm/bvme6000hw.h> #include <asm/io.h> #include <linux/uaccess.h> #include <asm/setup.h> /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; static atomic_t rtc_status = ATOMIC_INIT(1); static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr; unsigned long flags; struct rtc_time wtime; void __user *argp = (void __user *)arg; switch (cmd) { case RTC_RD_TIME: /* Read the time/date from RTC */ { local_irq_save(flags); /* Ensure clock and real-time-mode-register are accessible */ msr = rtc->msr & 0xc0; rtc->msr = 0x40; memset(&wtime, 0, sizeof(struct rtc_time)); do { wtime.tm_sec = bcd2bin(rtc->bcd_sec); wtime.tm_min = bcd2bin(rtc->bcd_min); wtime.tm_hour = bcd2bin(rtc->bcd_hr); wtime.tm_mday = bcd2bin(rtc->bcd_dom); wtime.tm_mon = bcd2bin(rtc->bcd_mth)-1; wtime.tm_year = bcd2bin(rtc->bcd_year); if (wtime.tm_year < 70) wtime.tm_year += 100; wtime.tm_wday = bcd2bin(rtc->bcd_dow)-1; } while (wtime.tm_sec != bcd2bin(rtc->bcd_sec)); rtc->msr = msr; local_irq_restore(flags); return copy_to_user(argp, &wtime, sizeof wtime) ? -EFAULT : 0; } case RTC_SET_TIME: /* Set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned int yrs; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year; if (yrs < 1900) yrs += 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (mon < 1) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; if (yrs >= 2070) return -EINVAL; local_irq_save(flags); /* Ensure clock and real-time-mode-register are accessible */ msr = rtc->msr & 0xc0; rtc->msr = 0x40; rtc->t0cr_rtmr = yrs%4; rtc->bcd_tenms = 0; rtc->bcd_sec = bin2bcd(sec); rtc->bcd_min = bin2bcd(min); rtc->bcd_hr = bin2bcd(hrs); rtc->bcd_dom = bin2bcd(day); rtc->bcd_mth = bin2bcd(mon); rtc->bcd_year = bin2bcd(yrs%100); if (rtc_tm.tm_wday >= 0) rtc->bcd_dow = bin2bcd(rtc_tm.tm_wday+1); rtc->t0cr_rtmr = yrs%4 | 0x08; rtc->msr = msr; local_irq_restore(flags); return 0; } default: return -EINVAL; } } /* * We enforce only one user at a time here with the open/close. */ static int rtc_open(struct inode *inode, struct file *file) { if (!atomic_dec_and_test(&rtc_status)) { atomic_inc(&rtc_status); return -EBUSY; } return 0; } static int rtc_release(struct inode *inode, struct file *file) { atomic_inc(&rtc_status); return 0; } /* * The various file operations we support. */ static const struct file_operations rtc_fops = { .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, .llseek = noop_llseek, }; static struct miscdevice rtc_dev = { .minor = RTC_MINOR, .name = "rtc", .fops = &rtc_fops }; static int __init rtc_DP8570A_init(void) { if (!MACH_IS_BVME6000) return -ENODEV; pr_info("DP8570A Real Time Clock Driver v%s\n", RTC_VERSION); return misc_register(&rtc_dev); } module_init(rtc_DP8570A_init);
linux-master
arch/m68k/bvme6000/rtc.c
/* * arch/m68k/bvme6000/config.c * * Copyright (C) 1997 Richard Hirst [[email protected]] * * Based on: * * linux/amiga/config.c * * Copyright (C) 1993 Hamish Macdonald * * This file is subject to the terms and conditions of the GNU General Public * License. See the file README.legal in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/clocksource.h> #include <linux/console.h> #include <linux/linkage.h> #include <linux/init.h> #include <linux/major.h> #include <linux/rtc.h> #include <linux/interrupt.h> #include <linux/bcd.h> #include <asm/bootinfo.h> #include <asm/bootinfo-vme.h> #include <asm/byteorder.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/bvme6000hw.h> #include <asm/config.h> static void bvme6000_get_model(char *model); extern void bvme6000_sched_init(void); extern int bvme6000_hwclk (int, struct rtc_time *); extern void bvme6000_reset (void); void bvme6000_set_vectors (void); int __init bvme6000_parse_bootinfo(const struct bi_record *bi) { if (be16_to_cpu(bi->tag) == BI_VME_TYPE) return 0; else return 1; } void bvme6000_reset(void) { volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; pr_info("\r\n\nCalled bvme6000_reset\r\n" "\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r\r"); /* The string of returns is to delay the reset until the whole * message is output. */ /* Enable the watchdog, via PIT port C bit 4 */ pit->pcddr |= 0x10; /* WDOG enable */ while(1) ; } static void bvme6000_get_model(char *model) { sprintf(model, "BVME%d000", m68k_cputype == CPU_68060 ? 6 : 4); } /* * This function is called during kernel startup to initialize * the bvme6000 IRQ handling routines. */ static void __init bvme6000_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, 192); } void __init config_bvme6000(void) { volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; /* Board type is only set by newer versions of vmelilo/tftplilo */ if (!vme_brdtype) { if (m68k_cputype == CPU_68060) vme_brdtype = VME_TYPE_BVME6000; else vme_brdtype = VME_TYPE_BVME4000; } #if 0 /* Call bvme6000_set_vectors() so ABORT will work, along with BVMBug * debugger. Note trap_init() will splat the abort vector, but * bvme6000_init_IRQ() will put it back again. Hopefully. */ bvme6000_set_vectors(); #endif mach_sched_init = bvme6000_sched_init; mach_init_IRQ = bvme6000_init_IRQ; mach_hwclk = bvme6000_hwclk; mach_reset = bvme6000_reset; mach_get_model = bvme6000_get_model; pr_info("Board is %sconfigured as a System Controller\n", *config_reg_ptr & BVME_CONFIG_SW1 ? "" : "not "); /* Now do the PIT configuration */ pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */ pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ pit->padr = 0x00; /* Just to be tidy! */ pit->paddr = 0x00; /* All inputs for now (safest) */ pit->pbcr = 0x80; /* Sub Mode 1x, H4 i/p, no DMA */ pit->pbdr = 0xbc | (*config_reg_ptr & BVME_CONFIG_SW1 ? 0 : 0x40); /* PRI, SYSCON?, Level3, SCC clks from xtal */ pit->pbddr = 0xf3; /* Mostly outputs */ pit->pcdr = 0x01; /* PA transceiver disabled */ pit->pcddr = 0x03; /* WDOG disable */ /* Disable snooping for Ethernet and VME accesses */ bvme_acr_addrctl = 0; } irqreturn_t bvme6000_abort_int (int irq, void *dev_id) { unsigned long *new = (unsigned long *)vectors; unsigned long *old = (unsigned long *)0xf8000000; /* Wait for button release */ while (*(volatile unsigned char *)BVME_LOCAL_IRQ_STAT & BVME_ABORT_STATUS) ; *(new+4) = *(old+4); /* Illegal instruction */ *(new+9) = *(old+9); /* Trace */ *(new+47) = *(old+47); /* Trap #15 */ *(new+0x1f) = *(old+0x1f); /* ABORT switch */ return IRQ_HANDLED; } static u64 bvme6000_read_clk(struct clocksource *cs); static struct clocksource bvme6000_clk = { .name = "rtc", .rating = 250, .read = bvme6000_read_clk, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static u32 clk_total, clk_offset; #define RTC_TIMER_CLOCK_FREQ 8000000 #define RTC_TIMER_CYCLES (RTC_TIMER_CLOCK_FREQ / HZ) #define RTC_TIMER_COUNT ((RTC_TIMER_CYCLES / 2) - 1) static irqreturn_t bvme6000_timer_int (int irq, void *dev_id) { unsigned long flags; volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr; local_irq_save(flags); msr = rtc->msr & 0xc0; rtc->msr = msr | 0x20; /* Ack the interrupt */ clk_total += RTC_TIMER_CYCLES; clk_offset = 0; legacy_timer_tick(1); local_irq_restore(flags); return IRQ_HANDLED; } /* * Set up the RTC timer 1 to mode 2, so T1 output toggles every 5ms * (40000 x 125ns). It will interrupt every 10ms, when T1 goes low. * So, when reading the elapsed time, you should read timer1, * subtract it from 39999, and then add 40000 if T1 is high. * That gives you the number of 125ns ticks in to the 10ms period, * so divide by 8 to get the microsecond result. */ void bvme6000_sched_init (void) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; rtc->msr = 0; /* Ensure timer registers accessible */ if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, IRQF_TIMER, "timer", NULL)) panic ("Couldn't register timer int"); rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */ rtc->t1msb = RTC_TIMER_COUNT >> 8; rtc->t1lsb = RTC_TIMER_COUNT & 0xff; rtc->irr_icr1 &= 0xef; /* Route timer 1 to INTR pin */ rtc->msr = 0x40; /* Access int.cntrl, etc */ rtc->pfr_icr0 = 0x80; /* Just timer 1 ints enabled */ rtc->irr_icr1 = 0; rtc->t1cr_omr = 0x0a; /* INTR+T1 active lo, push-pull */ rtc->t0cr_rtmr &= 0xdf; /* Stop timers in standby */ rtc->msr = 0; /* Access timer 1 control */ rtc->t1cr_omr = 0x05; /* Mode 2, ext clk, GO */ rtc->msr = msr; clocksource_register_hz(&bvme6000_clk, RTC_TIMER_CLOCK_FREQ); if (request_irq(BVME_IRQ_ABORT, bvme6000_abort_int, 0, "abort", bvme6000_abort_int)) panic ("Couldn't register abort int"); } /* * NOTE: Don't accept any readings within 5us of rollover, as * the T1INT bit may be a little slow getting set. There is also * a fault in the chip, meaning that reads may produce invalid * results... */ static u64 bvme6000_read_clk(struct clocksource *cs) { unsigned long flags; volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE; unsigned char msr, msb; unsigned char t1int, t1op; u32 v = 800000, ov; local_irq_save(flags); msr = rtc->msr & 0xc0; rtc->msr = 0; /* Ensure timer registers accessible */ do { ov = v; t1int = rtc->msr & 0x20; t1op = pit->pcdr & 0x04; rtc->t1cr_omr |= 0x40; /* Latch timer1 */ msb = rtc->t1msb; /* Read timer1 */ v = (msb << 8) | rtc->t1lsb; /* Read timer1 */ } while (t1int != (rtc->msr & 0x20) || t1op != (pit->pcdr & 0x04) || abs(ov-v) > 80 || v > RTC_TIMER_COUNT - (RTC_TIMER_COUNT / 100)); v = RTC_TIMER_COUNT - v; if (!t1op) /* If in second half cycle.. */ v += RTC_TIMER_CYCLES / 2; if (msb > 0 && t1int) clk_offset = RTC_TIMER_CYCLES; rtc->msr = msr; v += clk_offset + clk_total; local_irq_restore(flags); return v; } /* * Looks like op is non-zero for setting the clock, and zero for * reading the clock. * * struct hwclk_time { * unsigned sec; 0..59 * unsigned min; 0..59 * unsigned hour; 0..23 * unsigned day; 1..31 * unsigned mon; 0..11 * unsigned year; 00... * int wday; 0..6, 0 is Sunday, -1 means unknown/don't set * }; */ int bvme6000_hwclk(int op, struct rtc_time *t) { volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE; unsigned char msr = rtc->msr & 0xc0; rtc->msr = 0x40; /* Ensure clock and real-time-mode-register * are accessible */ if (op) { /* Write.... */ rtc->t0cr_rtmr = t->tm_year%4; rtc->bcd_tenms = 0; rtc->bcd_sec = bin2bcd(t->tm_sec); rtc->bcd_min = bin2bcd(t->tm_min); rtc->bcd_hr = bin2bcd(t->tm_hour); rtc->bcd_dom = bin2bcd(t->tm_mday); rtc->bcd_mth = bin2bcd(t->tm_mon + 1); rtc->bcd_year = bin2bcd(t->tm_year%100); if (t->tm_wday >= 0) rtc->bcd_dow = bin2bcd(t->tm_wday+1); rtc->t0cr_rtmr = t->tm_year%4 | 0x08; } else { /* Read.... */ do { t->tm_sec = bcd2bin(rtc->bcd_sec); t->tm_min = bcd2bin(rtc->bcd_min); t->tm_hour = bcd2bin(rtc->bcd_hr); t->tm_mday = bcd2bin(rtc->bcd_dom); t->tm_mon = bcd2bin(rtc->bcd_mth)-1; t->tm_year = bcd2bin(rtc->bcd_year); if (t->tm_year < 70) t->tm_year += 100; t->tm_wday = bcd2bin(rtc->bcd_dow)-1; } while (t->tm_sec != bcd2bin(rtc->bcd_sec)); } rtc->msr = msr; return 0; }
linux-master
arch/m68k/bvme6000/config.c
/* * natfeat.c - ARAnyM hardware support via Native Features (natfeats) * * Copyright (c) 2005 Petr Stehlik of ARAnyM dev team * * Reworked for Linux by Roman Zippel <[email protected]> * * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. */ #include <linux/init.h> #include <linux/types.h> #include <linux/console.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/natfeat.h> extern long nf_get_id_phys(unsigned long feature_name); asm("\n" " .global nf_get_id_phys,nf_call\n" "nf_get_id_phys:\n" " .short 0x7300\n" " rts\n" "nf_call:\n" " .short 0x7301\n" " rts\n" "1: moveq.l #0,%d0\n" " rts\n" " .section __ex_table,\"a\"\n" " .long nf_get_id_phys,1b\n" " .long nf_call,1b\n" " .previous"); EXPORT_SYMBOL_GPL(nf_call); long nf_get_id(const char *feature_name) { /* feature_name may be in vmalloc()ed memory, so make a copy */ char name_copy[32]; size_t n; n = strlcpy(name_copy, feature_name, sizeof(name_copy)); if (n >= sizeof(name_copy)) return 0; return nf_get_id_phys(virt_to_phys(name_copy)); } EXPORT_SYMBOL_GPL(nf_get_id); void nfprint(const char *fmt, ...) { static char buf[256]; va_list ap; int n; va_start(ap, fmt); n = vsnprintf(buf, 256, fmt, ap); nf_call(nf_get_id("NF_STDERR"), virt_to_phys(buf)); va_end(ap); } static void nf_poweroff(void) { long id = nf_get_id("NF_SHUTDOWN"); if (id) nf_call(id); } void __init nf_init(void) { unsigned long id, version; char buf[256]; id = nf_get_id("NF_VERSION"); if (!id) return; version = nf_call(id); id = nf_get_id("NF_NAME"); if (!id) return; nf_call(id, virt_to_phys(buf), 256); buf[255] = 0; pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16, version & 0xffff); register_platform_power_off(nf_poweroff); }
linux-master
arch/m68k/emu/natfeat.c
/* * ARAnyM console driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/console.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/natfeat.h> static int stderr_id; static struct tty_port nfcon_tty_port; static struct tty_driver *nfcon_tty_driver; static void nfputs(const char *str, unsigned int count) { char buf[68]; unsigned long phys = virt_to_phys(buf); buf[64] = 0; while (count > 64) { memcpy(buf, str, 64); nf_call(stderr_id, phys); str += 64; count -= 64; } memcpy(buf, str, count); buf[count] = 0; nf_call(stderr_id, phys); } static void nfcon_write(struct console *con, const char *str, unsigned int count) { nfputs(str, count); } static struct tty_driver *nfcon_device(struct console *con, int *index) { *index = 0; return console_is_registered(con) ? nfcon_tty_driver : NULL; } static struct console nf_console = { .name = "nfcon", .write = nfcon_write, .device = nfcon_device, .flags = CON_PRINTBUFFER, .index = -1, }; static int nfcon_tty_open(struct tty_struct *tty, struct file *filp) { return 0; } static void nfcon_tty_close(struct tty_struct *tty, struct file *filp) { } static ssize_t nfcon_tty_write(struct tty_struct *tty, const u8 *buf, size_t count) { nfputs(buf, count); return count; } static int nfcon_tty_put_char(struct tty_struct *tty, u8 ch) { u8 temp[2] = { ch, 0 }; nf_call(stderr_id, virt_to_phys(temp)); return 1; } static unsigned int nfcon_tty_write_room(struct tty_struct *tty) { return 64; } static const struct tty_operations nfcon_tty_ops = { .open = nfcon_tty_open, .close = nfcon_tty_close, .write = nfcon_tty_write, .put_char = nfcon_tty_put_char, .write_room = nfcon_tty_write_room, }; #ifndef MODULE static int __init nf_debug_setup(char *arg) { if (strcmp(arg, "nfcon")) return 0; stderr_id = nf_get_id("NF_STDERR"); if (stderr_id) { /* * The console will be enabled when debug=nfcon is specified * as a kernel parameter. Since this is a non-standard way * of enabling consoles, it must be explicitly enabled. */ nf_console.flags |= CON_ENABLED; register_console(&nf_console); } return 0; } early_param("debug", nf_debug_setup); #endif /* !MODULE */ static int __init nfcon_init(void) { struct tty_driver *driver; int res; stderr_id = nf_get_id("NF_STDERR"); if (!stderr_id) return -ENODEV; driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW); if (IS_ERR(driver)) return PTR_ERR(driver); tty_port_init(&nfcon_tty_port); driver->driver_name = "nfcon"; driver->name = "nfcon"; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; tty_set_operations(driver, &nfcon_tty_ops); tty_port_link_device(&nfcon_tty_port, driver, 0); res = tty_register_driver(driver); if (res) { pr_err("failed to register nfcon tty driver\n"); tty_driver_kref_put(driver); tty_port_destroy(&nfcon_tty_port); return res; } nfcon_tty_driver = driver; if (!console_is_registered(&nf_console)) register_console(&nf_console); return 0; } static void __exit nfcon_exit(void) { unregister_console(&nf_console); tty_unregister_driver(nfcon_tty_driver); tty_driver_kref_put(nfcon_tty_driver); tty_port_destroy(&nfcon_tty_port); } module_init(nfcon_init); module_exit(nfcon_exit); MODULE_LICENSE("GPL");
linux-master
arch/m68k/emu/nfcon.c
/* * atari_nfeth.c - ARAnyM ethernet card driver for GNU/Linux * * Copyright (c) 2005 Milan Jurik, Petr Stehlik of ARAnyM dev team * * Based on ARAnyM driver for FreeMiNT written by Standa Opichal * * This software may be used and distributed according to the terms of * the GNU General Public License (GPL), incorporated herein by reference. */ #define DRV_VERSION "0.3" #define DRV_RELDATE "10/12/2005" #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/module.h> #include <asm/natfeat.h> #include <asm/virtconvert.h> enum { GET_VERSION = 0,/* no parameters, return NFAPI_VERSION in d0 */ XIF_INTLEVEL, /* no parameters, return Interrupt Level in d0 */ XIF_IRQ, /* acknowledge interrupt from host */ XIF_START, /* (ethX), called on 'ifup', start receiver thread */ XIF_STOP, /* (ethX), called on 'ifdown', stop the thread */ XIF_READLENGTH, /* (ethX), return size of network data block to read */ XIF_READBLOCK, /* (ethX, buffer, size), read block of network data */ XIF_WRITEBLOCK, /* (ethX, buffer, size), write block of network data */ XIF_GET_MAC, /* (ethX, buffer, size), return MAC HW addr in buffer */ XIF_GET_IPHOST, /* (ethX, buffer, size), return IP address of host */ XIF_GET_IPATARI,/* (ethX, buffer, size), return IP address of atari */ XIF_GET_NETMASK /* (ethX, buffer, size), return IP netmask */ }; #define MAX_UNIT 8 /* These identify the driver base version and may not be removed. */ static const char version[] = KERN_INFO KBUILD_MODNAME ".c:v" DRV_VERSION " " DRV_RELDATE " S.Opichal, M.Jurik, P.Stehlik\n" KERN_INFO " http://aranym.org/\n"; MODULE_AUTHOR("Milan Jurik"); MODULE_DESCRIPTION("Atari NFeth driver"); MODULE_LICENSE("GPL"); static long nfEtherID; static int nfEtherIRQ; struct nfeth_private { int ethX; }; static struct net_device *nfeth_dev[MAX_UNIT]; static int nfeth_open(struct net_device *dev) { struct nfeth_private *priv = netdev_priv(dev); int res; res = nf_call(nfEtherID + XIF_START, priv->ethX); netdev_dbg(dev, "%s: %d\n", __func__, res); /* Ready for data */ netif_start_queue(dev); return 0; } static int nfeth_stop(struct net_device *dev) { struct nfeth_private *priv = netdev_priv(dev); /* No more data */ netif_stop_queue(dev); nf_call(nfEtherID + XIF_STOP, priv->ethX); return 0; } /* * Read a packet out of the adapter and pass it to the upper layers */ static inline void recv_packet(struct net_device *dev) { struct nfeth_private *priv = netdev_priv(dev); unsigned short pktlen; struct sk_buff *skb; /* read packet length (excluding 32 bit crc) */ pktlen = nf_call(nfEtherID + XIF_READLENGTH, priv->ethX); netdev_dbg(dev, "%s: %u\n", __func__, pktlen); if (!pktlen) { netdev_dbg(dev, "%s: pktlen == 0\n", __func__); dev->stats.rx_errors++; return; } skb = dev_alloc_skb(pktlen + 2); if (!skb) { netdev_dbg(dev, "%s: out of mem (buf_alloc failed)\n", __func__); dev->stats.rx_dropped++; return; } skb->dev = dev; skb_reserve(skb, 2); /* 16 Byte align */ skb_put(skb, pktlen); /* make room */ nf_call(nfEtherID + XIF_READBLOCK, priv->ethX, virt_to_phys(skb->data), pktlen); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; /* and enqueue packet */ return; } static irqreturn_t nfeth_interrupt(int irq, void *dev_id) { int i, m, mask; mask = nf_call(nfEtherID + XIF_IRQ, 0); for (i = 0, m = 1; i < MAX_UNIT; m <<= 1, i++) { if (mask & m && nfeth_dev[i]) { recv_packet(nfeth_dev[i]); nf_call(nfEtherID + XIF_IRQ, m); } } return IRQ_HANDLED; } static int nfeth_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int len; char *data, shortpkt[ETH_ZLEN]; struct nfeth_private *priv = netdev_priv(dev); data = skb->data; len = skb->len; if (len < ETH_ZLEN) { memset(shortpkt, 0, ETH_ZLEN); memcpy(shortpkt, data, len); data = shortpkt; len = ETH_ZLEN; } netdev_dbg(dev, "%s: send %u bytes\n", __func__, len); nf_call(nfEtherID + XIF_WRITEBLOCK, priv->ethX, virt_to_phys(data), len); dev->stats.tx_packets++; dev->stats.tx_bytes += len; dev_kfree_skb(skb); return 0; } static void nfeth_tx_timeout(struct net_device *dev, unsigned int txqueue) { dev->stats.tx_errors++; netif_wake_queue(dev); } static const struct net_device_ops nfeth_netdev_ops = { .ndo_open = nfeth_open, .ndo_stop = nfeth_stop, .ndo_start_xmit = nfeth_xmit, .ndo_tx_timeout = nfeth_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; static struct net_device * __init nfeth_probe(int unit) { struct net_device *dev; struct nfeth_private *priv; char mac[ETH_ALEN], host_ip[32], local_ip[32]; int err; if (!nf_call(nfEtherID + XIF_GET_MAC, unit, virt_to_phys(mac), ETH_ALEN)) return NULL; dev = alloc_etherdev(sizeof(struct nfeth_private)); if (!dev) return NULL; dev->irq = nfEtherIRQ; dev->netdev_ops = &nfeth_netdev_ops; eth_hw_addr_set(dev, mac); priv = netdev_priv(dev); priv->ethX = unit; err = register_netdev(dev); if (err) { free_netdev(dev); return NULL; } nf_call(nfEtherID + XIF_GET_IPHOST, unit, virt_to_phys(host_ip), sizeof(host_ip)); nf_call(nfEtherID + XIF_GET_IPATARI, unit, virt_to_phys(local_ip), sizeof(local_ip)); netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip, local_ip, mac); return dev; } static int __init nfeth_init(void) { long ver; int error, i; nfEtherID = nf_get_id("ETHERNET"); if (!nfEtherID) return -ENODEV; ver = nf_call(nfEtherID + GET_VERSION); pr_info("API %lu\n", ver); nfEtherIRQ = nf_call(nfEtherID + XIF_INTLEVEL); error = request_irq(nfEtherIRQ, nfeth_interrupt, IRQF_SHARED, "eth emu", nfeth_interrupt); if (error) { pr_err("request for irq %d failed %d", nfEtherIRQ, error); return error; } for (i = 0; i < MAX_UNIT; i++) nfeth_dev[i] = nfeth_probe(i); return 0; } static void __exit nfeth_cleanup(void) { int i; for (i = 0; i < MAX_UNIT; i++) { if (nfeth_dev[i]) { unregister_netdev(nfeth_dev[i]); free_netdev(nfeth_dev[i]); } } free_irq(nfEtherIRQ, nfeth_interrupt); } module_init(nfeth_init); module_exit(nfeth_cleanup);
linux-master
arch/m68k/emu/nfeth.c
/* * ARAnyM block device driver * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/slab.h> #include <asm/natfeat.h> static long nfhd_id; enum { /* emulation entry points */ NFHD_READ_WRITE = 10, NFHD_GET_CAPACITY = 14, /* skip ACSI devices */ NFHD_DEV_OFFSET = 8, }; static inline s32 nfhd_read_write(u32 major, u32 minor, u32 rwflag, u32 recno, u32 count, u32 buf) { return nf_call(nfhd_id + NFHD_READ_WRITE, major, minor, rwflag, recno, count, buf); } static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, u32 *blocksize) { return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, virt_to_phys(blocks), virt_to_phys(blocksize)); } static LIST_HEAD(nfhd_list); static int major_num; module_param(major_num, int, 0); struct nfhd_device { struct list_head list; int id; u32 blocks, bsize; int bshift; struct gendisk *disk; }; static void nfhd_submit_bio(struct bio *bio) { struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; struct bvec_iter iter; int dir, len, shift; sector_t sec = bio->bi_iter.bi_sector; dir = bio_data_dir(bio); shift = dev->bshift; bio_for_each_segment(bvec, bio, iter) { len = bvec.bv_len; len >>= 9; nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, page_to_phys(bvec.bv_page) + bvec.bv_offset); sec += len; } bio_endio(bio); } static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct nfhd_device *dev = bdev->bd_disk->private_data; geo->cylinders = dev->blocks >> (6 - dev->bshift); geo->heads = 4; geo->sectors = 16; return 0; } static const struct block_device_operations nfhd_ops = { .owner = THIS_MODULE, .submit_bio = nfhd_submit_bio, .getgeo = nfhd_getgeo, }; static int __init nfhd_init_one(int id, u32 blocks, u32 bsize) { struct nfhd_device *dev; int dev_id = id - NFHD_DEV_OFFSET; int err = -ENOMEM; pr_info("nfhd%u: found device with %u blocks (%u bytes)\n", dev_id, blocks, bsize); if (bsize < 512 || (bsize & (bsize - 1))) { pr_warn("nfhd%u: invalid block size\n", dev_id); return -EINVAL; } dev = kmalloc(sizeof(struct nfhd_device), GFP_KERNEL); if (!dev) goto out; dev->id = id; dev->blocks = blocks; dev->bsize = bsize; dev->bshift = ffs(bsize) - 10; dev->disk = blk_alloc_disk(NUMA_NO_NODE); if (!dev->disk) goto free_dev; dev->disk->major = major_num; dev->disk->first_minor = dev_id * 16; dev->disk->minors = 16; dev->disk->fops = &nfhd_ops; dev->disk->private_data = dev; sprintf(dev->disk->disk_name, "nfhd%u", dev_id); set_capacity(dev->disk, (sector_t)blocks * (bsize / 512)); blk_queue_logical_block_size(dev->disk->queue, bsize); err = add_disk(dev->disk); if (err) goto out_cleanup_disk; list_add_tail(&dev->list, &nfhd_list); return 0; out_cleanup_disk: put_disk(dev->disk); free_dev: kfree(dev); out: return err; } static int __init nfhd_init(void) { u32 blocks, bsize; int ret; int i; nfhd_id = nf_get_id("XHDI"); if (!nfhd_id) return -ENODEV; ret = register_blkdev(major_num, "nfhd"); if (ret < 0) { pr_warn("nfhd: unable to get major number\n"); return ret; } if (!major_num) major_num = ret; for (i = NFHD_DEV_OFFSET; i < 24; i++) { if (nfhd_get_capacity(i, 0, &blocks, &bsize)) continue; nfhd_init_one(i, blocks, bsize); } return 0; } static void __exit nfhd_exit(void) { struct nfhd_device *dev, *next; list_for_each_entry_safe(dev, next, &nfhd_list, list) { list_del(&dev->list); del_gendisk(dev->disk); put_disk(dev->disk); kfree(dev); } unregister_blkdev(major_num, "nfhd"); } module_init(nfhd_init); module_exit(nfhd_exit); MODULE_LICENSE("GPL");
linux-master
arch/m68k/emu/nfblock.c
/* * linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored * in Chip RAM with the kernel command * line option `debug=mem'. * * © Copyright 1996 by Geert Uytterhoeven <[email protected]> * * * Usage: * * dmesg * dmesg <CHIPMEM_END> * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define CHIPMEM_START 0x00000000 #define CHIPMEM_END 0x00200000 /* overridden by argv[1] */ #define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */ #define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */ struct savekmsg { u_long magic1; /* SAVEKMSG_MAGIC1 */ u_long magic2; /* SAVEKMSG_MAGIC2 */ u_long magicptr; /* address of magic1 */ u_long size; char data[]; }; int main(int argc, char *argv[]) { u_long start = CHIPMEM_START, end = CHIPMEM_END, p; int found = 0; struct savekmsg *m = NULL; if (argc >= 2) end = strtoul(argv[1], NULL, 0); printf("Searching for SAVEKMSG magic...\n"); for (p = start; p <= end-sizeof(struct savekmsg); p += 4) { m = (struct savekmsg *)p; if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) && (m->magicptr == p)) { found = 1; break; } } if (!found) printf("Not found\n"); else { printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data); puts(">>>>>>>>>>>>>>>>>>>>"); fflush(stdout); write(1, &m->data, m->size); fflush(stdout); puts("<<<<<<<<<<<<<<<<<<<<"); } return(0); }
linux-master
arch/m68k/tools/amiga/dmesg.c
/* * linux/arch/m68k/atari/atasound.c * * ++Geert: Moved almost all stuff to linux/drivers/sound/ * * The author of atari_nosound, atari_mksound and atari_microwire_cmd is * unknown. (++roman: That's me... :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * 1998-05-31 ++andreas: atari_mksound rewritten to always use the envelope, * no timer, atari_nosound removed. * */ #include <linux/sched.h> #include <linux/timer.h> #include <linux/major.h> #include <linux/fcntl.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/atarihw.h> #include <asm/irq.h> #include <asm/atariints.h> /* * stuff from the old atasound.c */ void atari_microwire_cmd (int cmd) { tt_microwire.mask = 0x7ff; tt_microwire.data = MW_LM1992_ADDR | cmd; /* Busy wait for data being completely sent :-( */ while( tt_microwire.mask != 0x7ff) ; } EXPORT_SYMBOL(atari_microwire_cmd); /* PSG base frequency */ #define PSG_FREQ 125000 /* PSG envelope base frequency times 10 */ #define PSG_ENV_FREQ_10 78125 void atari_mksound (unsigned int hz, unsigned int ticks) { /* Generates sound of some frequency for some number of clock ticks. */ unsigned long flags; unsigned char tmp; int period; local_irq_save(flags); /* Disable generator A in mixer control. */ sound_ym.rd_data_reg_sel = 7; tmp = sound_ym.rd_data_reg_sel; tmp |= 011; sound_ym.wd_data = tmp; if (hz) { /* Convert from frequency value to PSG period value (base frequency 125 kHz). */ period = PSG_FREQ / hz; if (period > 0xfff) period = 0xfff; /* Set generator A frequency to hz. */ sound_ym.rd_data_reg_sel = 0; sound_ym.wd_data = period & 0xff; sound_ym.rd_data_reg_sel = 1; sound_ym.wd_data = (period >> 8) & 0xf; if (ticks) { /* Set length of envelope (max 8 sec). */ int length = (ticks * PSG_ENV_FREQ_10) / HZ / 10; if (length > 0xffff) length = 0xffff; sound_ym.rd_data_reg_sel = 11; sound_ym.wd_data = length & 0xff; sound_ym.rd_data_reg_sel = 12; sound_ym.wd_data = length >> 8; /* Envelope form: max -> min single. */ sound_ym.rd_data_reg_sel = 13; sound_ym.wd_data = 0; /* Use envelope for generator A. */ sound_ym.rd_data_reg_sel = 8; sound_ym.wd_data = 0x10; } else { /* Set generator A level to maximum, no envelope. */ sound_ym.rd_data_reg_sel = 8; sound_ym.wd_data = 15; } /* Turn on generator A in mixer control. */ sound_ym.rd_data_reg_sel = 7; tmp &= ~1; sound_ym.wd_data = tmp; } local_irq_restore(flags); }
linux-master
arch/m68k/atari/atasound.c
/* * arch/m68k/atari/ataints.c -- Atari Linux interrupt handling code * * 5/2/94 Roman Hodek: * Added support for TT interrupts; setup for TT SCU (may someone has * twiddled there and we won't get the right interrupts :-() * * Major change: The device-independent code in m68k/ints.c didn't know * about non-autovec ints yet. It hardcoded the number of possible ints to * 7 (IRQ1...IRQ7). But the Atari has lots of non-autovec ints! I made the * number of possible ints a constant defined in interrupt.h, which is * 47 for the Atari. So we can call request_irq() for all Atari interrupts * just the normal way. Additionally, all vectors >= 48 are initialized to * call trap() instead of inthandler(). This must be changed here, too. * * 1995-07-16 Lars Brinkhoff <[email protected]>: * Corrected a bug in atari_add_isr() which rejected all SCC * interrupt sources if there were no TT MFP! * * 12/13/95: New interface functions atari_level_triggered_int() and * atari_register_vme_int() as support for level triggered VME interrupts. * * 02/12/96: (Roman) * Total rewrite of Atari interrupt handling, for new scheme see comments * below. * * 1996-09-03 lars brinkhoff <[email protected]>: * Added new function atari_unregister_vme_int(), and * modified atari_register_vme_int() as well as IS_VALID_INTNO() * to work with it. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/irq.h> #include <asm/traps.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/irq.h> #include <asm/entry.h> #include <asm/io.h> /* * Atari interrupt handling scheme: * -------------------------------- * * All interrupt source have an internal number (defined in * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can * be allocated by atari_register_vme_int(). */ /* * Bitmap for free interrupt vector numbers * (new vectors starting from 0x70 can be allocated by * atari_register_vme_int()) */ static int free_vme_vec_bitmap; /* GK: * HBL IRQ handler for Falcon. Nobody needs it :-) * ++andreas: raise ipl to disable further HBLANK interrupts. */ asmlinkage void falcon_hblhandler(void); asm(".text\n" __ALIGN_STR "\n\t" "falcon_hblhandler:\n\t" "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */ "rte"); extern void atari_microwire_cmd(int cmd); static unsigned int atari_irq_startup(struct irq_data *data) { unsigned int irq = data->irq; m68k_irq_startup(data); atari_turnon_irq(irq); atari_enable_irq(irq); return 0; } static void atari_irq_shutdown(struct irq_data *data) { unsigned int irq = data->irq; atari_disable_irq(irq); atari_turnoff_irq(irq); m68k_irq_shutdown(data); if (irq == IRQ_AUTO_4) vectors[VEC_INT4] = falcon_hblhandler; } static void atari_irq_enable(struct irq_data *data) { atari_enable_irq(data->irq); } static void atari_irq_disable(struct irq_data *data) { atari_disable_irq(data->irq); } static struct irq_chip atari_irq_chip = { .name = "atari", .irq_startup = atari_irq_startup, .irq_shutdown = atari_irq_shutdown, .irq_enable = atari_irq_enable, .irq_disable = atari_irq_disable, }; /* * ST-MFP timer D chained interrupts - each driver gets its own timer * interrupt instance. */ struct mfptimerbase { volatile struct MFP *mfp; unsigned char mfp_mask, mfp_data; unsigned short int_mask; int handler_irq, mfptimer_irq, server_irq; char *name; } stmfp_base = { .mfp = &st_mfp, .int_mask = 0x0, .handler_irq = IRQ_MFP_TIMD, .mfptimer_irq = IRQ_MFP_TIMER1, .name = "MFP Timer D" }; static irqreturn_t mfp_timer_d_handler(int irq, void *dev_id) { struct mfptimerbase *base = dev_id; int mach_irq; unsigned char ints; mach_irq = base->mfptimer_irq; ints = base->int_mask; for (; ints; mach_irq++, ints >>= 1) { if (ints & 1) generic_handle_irq(mach_irq); } return IRQ_HANDLED; } static void atari_mfptimer_enable(struct irq_data *data) { int mfp_num = data->irq - IRQ_MFP_TIMER1; stmfp_base.int_mask |= 1 << mfp_num; atari_enable_irq(IRQ_MFP_TIMD); } static void atari_mfptimer_disable(struct irq_data *data) { int mfp_num = data->irq - IRQ_MFP_TIMER1; stmfp_base.int_mask &= ~(1 << mfp_num); if (!stmfp_base.int_mask) atari_disable_irq(IRQ_MFP_TIMD); } static struct irq_chip atari_mfptimer_chip = { .name = "timer_d", .irq_enable = atari_mfptimer_enable, .irq_disable = atari_mfptimer_disable, }; /* * EtherNAT CPLD interrupt handling * CPLD interrupt register is at phys. 0x80000023 * Need this mapped in at interrupt startup time * Possibly need this mapped on demand anyway - * EtherNAT USB driver needs to disable IRQ before * startup! */ static unsigned char *enat_cpld; static unsigned int atari_ethernat_startup(struct irq_data *data) { int enat_num = 140 - data->irq + 1; m68k_irq_startup(data); /* * map CPLD interrupt register */ if (!enat_cpld) enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); /* * do _not_ enable the USB chip interrupt here - causes interrupt storm * and triggers dead interrupt watchdog * Need to reset the USB chip to a sane state in early startup before * removing this hack */ if (enat_num == 1) *enat_cpld |= 1 << enat_num; return 0; } static void atari_ethernat_enable(struct irq_data *data) { int enat_num = 140 - data->irq + 1; /* * map CPLD interrupt register */ if (!enat_cpld) enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); *enat_cpld |= 1 << enat_num; } static void atari_ethernat_disable(struct irq_data *data) { int enat_num = 140 - data->irq + 1; /* * map CPLD interrupt register */ if (!enat_cpld) enat_cpld = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0x2); *enat_cpld &= ~(1 << enat_num); } static void atari_ethernat_shutdown(struct irq_data *data) { int enat_num = 140 - data->irq + 1; if (enat_cpld) { *enat_cpld &= ~(1 << enat_num); iounmap(enat_cpld); enat_cpld = NULL; } } static struct irq_chip atari_ethernat_chip = { .name = "ethernat", .irq_startup = atari_ethernat_startup, .irq_shutdown = atari_ethernat_shutdown, .irq_enable = atari_ethernat_enable, .irq_disable = atari_ethernat_disable, }; /* * void atari_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the atari IRQ handling routines. */ void __init atari_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER); m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1, NUM_ATARI_SOURCES - 1); /* Initialize the MFP(s) */ #ifdef ATARI_USE_SOFTWARE_EOI st_mfp.vec_adr = 0x48; /* Software EOI-Mode */ #else st_mfp.vec_adr = 0x40; /* Automatic EOI-Mode */ #endif st_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ st_mfp.int_en_b = 0x00; st_mfp.int_mk_a = 0xff; /* no Masking */ st_mfp.int_mk_b = 0xff; if (ATARIHW_PRESENT(TT_MFP)) { #ifdef ATARI_USE_SOFTWARE_EOI tt_mfp.vec_adr = 0x58; /* Software EOI-Mode */ #else tt_mfp.vec_adr = 0x50; /* Automatic EOI-Mode */ #endif tt_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ tt_mfp.int_en_b = 0x00; tt_mfp.int_mk_a = 0xff; /* no Masking */ tt_mfp.int_mk_b = 0xff; } if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) { atari_scc.cha_a_ctrl = 9; MFPDELAY(); atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */ } if (ATARIHW_PRESENT(SCU)) { /* init the SCU if present */ tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and * disable HSYNC interrupts (who * needs them?) MFP and SCC are * enabled in VME mask */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ } else { /* If no SCU and no Hades, the HSYNC interrupt needs to be * disabled this way. (Else _inthandler in kernel/sys_call.S * gets overruns) */ vectors[VEC_INT2] = falcon_hblhandler; vectors[VEC_INT4] = falcon_hblhandler; } if (ATARIHW_PRESENT(PCM_8BIT) && ATARIHW_PRESENT(MICROWIRE)) { /* Initialize the LM1992 Sound Controller to enable the PSG sound. This is misplaced here, it should be in an atasound_init(), that doesn't exist yet. */ atari_microwire_cmd(MW_LM1992_PSG_HIGH); } stdma_init(); /* Initialize the PSG: all sounds off, both ports output */ sound_ym.rd_data_reg_sel = 7; sound_ym.wd_data = 0xff; m68k_setup_irq_controller(&atari_mfptimer_chip, handle_simple_irq, IRQ_MFP_TIMER1, 8); irq_set_status_flags(IRQ_MFP_TIMER1, IRQ_IS_POLLED); irq_set_status_flags(IRQ_MFP_TIMER2, IRQ_IS_POLLED); /* prepare timer D data for use as poll interrupt */ /* set Timer D data Register - needs to be > 0 */ st_mfp.tim_dt_d = 254; /* < 100 Hz */ /* start timer D, div = 1:100 */ st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6; /* request timer D dispatch handler */ if (request_irq(IRQ_MFP_TIMD, mfp_timer_d_handler, IRQF_SHARED, stmfp_base.name, &stmfp_base)) pr_err("Couldn't register %s interrupt\n", stmfp_base.name); /* * EtherNAT ethernet / USB interrupt handlers */ m68k_setup_irq_controller(&atari_ethernat_chip, handle_simple_irq, 139, 2); } /* * atari_register_vme_int() returns the number of a free interrupt vector for * hardware with a programmable int vector (probably a VME board). */ unsigned int atari_register_vme_int(void) { int i; for (i = 0; i < 32; i++) if ((free_vme_vec_bitmap & (1 << i)) == 0) break; if (i == 16) return 0; free_vme_vec_bitmap |= 1 << i; return VME_SOURCE_BASE + i; } EXPORT_SYMBOL(atari_register_vme_int); void atari_unregister_vme_int(unsigned int irq) { if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) { irq -= VME_SOURCE_BASE; free_vme_vec_bitmap &= ~(1 << irq); } } EXPORT_SYMBOL(atari_unregister_vme_int);
linux-master
arch/m68k/atari/ataints.c
/* * Functions for ST-RAM allocations * * Copyright 1994-97 Roman Hodek <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/memblock.h> #include <linux/mount.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/ioport.h> #include <asm/setup.h> #include <asm/machdep.h> #include <asm/page.h> #include <asm/atarihw.h> #include <asm/atari_stram.h> #include <asm/io.h> /* * The ST-RAM allocator allocates memory from a pool of reserved ST-RAM of * configurable size, set aside on ST-RAM init. * As long as this pool is not exhausted, allocation of real ST-RAM can be * guaranteed. */ /* set if kernel is in ST-RAM */ static int kernel_in_stram; static struct resource stram_pool = { .name = "ST-RAM Pool" }; static unsigned long pool_size = 1024*1024; static unsigned long stram_virt_offset; static int __init atari_stram_setup(char *arg) { if (!MACH_IS_ATARI) return 0; pool_size = memparse(arg, NULL); return 0; } early_param("stram_pool", atari_stram_setup); /* * This init function is called very early by atari/config.c * It initializes some internal variables needed for stram_alloc() */ void __init atari_stram_init(void) { int i; /* * determine whether kernel code resides in ST-RAM * (then ST-RAM is the first memory block at virtual 0x0) */ kernel_in_stram = (m68k_memory[0].addr == 0); for (i = 0; i < m68k_num_memory; ++i) { if (m68k_memory[i].addr == 0) { return; } } /* Should never come here! (There is always ST-Ram!) */ panic("atari_stram_init: no ST-RAM found!"); } /* * This function is called from setup_arch() to reserve the pages needed for * ST-RAM management, if the kernel resides in ST-RAM. */ void __init atari_stram_reserve_pages(void *start_mem) { if (kernel_in_stram) { pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, PAGE_SIZE); if (!stram_pool.start) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, pool_size, PAGE_SIZE); stram_pool.end = stram_pool.start + pool_size - 1; request_resource(&iomem_resource, &stram_pool); stram_virt_offset = 0; pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", pool_size, &stram_pool); pr_debug("atari_stram pool: stram_virt_offset = %lx\n", stram_virt_offset); } } /* * This function is called as arch initcall to reserve the pages needed for * ST-RAM management, if the kernel does not reside in ST-RAM. */ int __init atari_stram_map_pages(void) { if (!kernel_in_stram) { /* * Skip page 0, as the fhe first 2 KiB are supervisor-only! */ pr_debug("atari_stram pool: kernel not in ST-RAM, using ioremap!\n"); stram_pool.start = PAGE_SIZE; stram_pool.end = stram_pool.start + pool_size - 1; request_resource(&iomem_resource, &stram_pool); stram_virt_offset = (unsigned long) ioremap(stram_pool.start, resource_size(&stram_pool)) - stram_pool.start; pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", pool_size, &stram_pool); pr_debug("atari_stram pool: stram_virt_offset = %lx\n", stram_virt_offset); } return 0; } arch_initcall(atari_stram_map_pages); void *atari_stram_to_virt(unsigned long phys) { return (void *)(phys + stram_virt_offset); } EXPORT_SYMBOL(atari_stram_to_virt); unsigned long atari_stram_to_phys(void *virt) { return (unsigned long)(virt - stram_virt_offset); } EXPORT_SYMBOL(atari_stram_to_phys); void *atari_stram_alloc(unsigned long size, const char *owner) { struct resource *res; int error; pr_debug("atari_stram_alloc: allocate %lu bytes\n", size); /* round up */ size = PAGE_ALIGN(size); res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (!res) return NULL; res->name = owner; error = allocate_resource(&stram_pool, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL); if (error < 0) { pr_err("atari_stram_alloc: allocate_resource() failed %d!\n", error); kfree(res); return NULL; } pr_debug("atari_stram_alloc: returning %pR\n", res); return atari_stram_to_virt(res->start); } EXPORT_SYMBOL(atari_stram_alloc); void atari_stram_free(void *addr) { unsigned long start = atari_stram_to_phys(addr); struct resource *res; unsigned long size; res = lookup_resource(&stram_pool, start); if (!res) { pr_err("atari_stram_free: trying to free nonexistent region " "at %p\n", addr); return; } size = resource_size(res); pr_debug("atari_stram_free: free %lu bytes at %p\n", size, addr); release_resource(res); kfree(res); } EXPORT_SYMBOL(atari_stram_free);
linux-master
arch/m68k/atari/stram.c
/* * linux/arch/m68k/atari/stmda.c * * Copyright (C) 1994 Roman Hodek * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* This file contains some function for controlling the access to the */ /* ST-DMA chip that may be shared between devices. Currently we have: */ /* TT: Floppy and ACSI bus */ /* Falcon: Floppy and SCSI */ /* */ /* The controlling functions set up a wait queue for access to the */ /* ST-DMA chip. Callers to stdma_lock() that cannot granted access are */ /* put onto a queue and waked up later if the owner calls */ /* stdma_release(). Additionally, the caller gives his interrupt */ /* service routine to stdma_lock(). */ /* */ /* On the Falcon, the IDE bus uses just the ACSI/Floppy interrupt, but */ /* not the ST-DMA chip itself. So falhd.c needs not to lock the */ /* chip. The interrupt is routed to falhd.c if IDE is configured, the */ /* model is a Falcon and the interrupt was caused by the HD controller */ /* (can be determined by looking at its status register). */ #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/module.h> #include <asm/atari_stdma.h> #include <asm/atariints.h> #include <asm/atarihw.h> #include <asm/io.h> #include <asm/irq.h> static int stdma_locked; /* the semaphore */ /* int func to be called */ static irq_handler_t stdma_isr; static void *stdma_isr_data; /* data passed to isr */ static DECLARE_WAIT_QUEUE_HEAD(stdma_wait); /* wait queue for ST-DMA */ /***************************** Prototypes *****************************/ static irqreturn_t stdma_int (int irq, void *dummy); /************************* End of Prototypes **************************/ /** * stdma_try_lock - attempt to acquire ST DMA interrupt "lock" * @handler: interrupt handler to use after acquisition * * Returns !0 if lock was acquired; otherwise 0. */ int stdma_try_lock(irq_handler_t handler, void *data) { unsigned long flags; local_irq_save(flags); if (stdma_locked) { local_irq_restore(flags); return 0; } stdma_locked = 1; stdma_isr = handler; stdma_isr_data = data; local_irq_restore(flags); return 1; } EXPORT_SYMBOL(stdma_try_lock); /* * Function: void stdma_lock( isrfunc isr, void *data ) * * Purpose: Tries to get a lock on the ST-DMA chip that is used by more * then one device driver. Waits on stdma_wait until lock is free. * stdma_lock() may not be called from an interrupt! You have to * get the lock in your main routine and release it when your * request is finished. * * Inputs: A interrupt function that is called until the lock is * released. * * Returns: nothing * */ void stdma_lock(irq_handler_t handler, void *data) { /* Since the DMA is used for file system purposes, we have to sleep uninterruptible (there may be locked buffers) */ wait_event(stdma_wait, stdma_try_lock(handler, data)); } EXPORT_SYMBOL(stdma_lock); /* * Function: void stdma_release( void ) * * Purpose: Releases the lock on the ST-DMA chip. * * Inputs: none * * Returns: nothing * */ void stdma_release(void) { unsigned long flags; local_irq_save(flags); stdma_locked = 0; stdma_isr = NULL; stdma_isr_data = NULL; wake_up(&stdma_wait); local_irq_restore(flags); } EXPORT_SYMBOL(stdma_release); /** * stdma_is_locked_by - allow lock holder to check whether it needs to release. * @handler: interrupt handler previously used to acquire lock. * * Returns !0 if locked for the given handler; 0 otherwise. */ int stdma_is_locked_by(irq_handler_t handler) { unsigned long flags; int result; local_irq_save(flags); result = stdma_locked && (stdma_isr == handler); local_irq_restore(flags); return result; } EXPORT_SYMBOL(stdma_is_locked_by); /* * Function: int stdma_islocked( void ) * * Purpose: Check if the ST-DMA is currently locked. * Note: Returned status is only valid if ints are disabled while calling and * as long as they remain disabled. * If called with ints enabled, status can change only from locked to * unlocked, because ints may not lock the ST-DMA. * * Inputs: none * * Returns: != 0 if locked, 0 otherwise * */ int stdma_islocked(void) { return stdma_locked; } EXPORT_SYMBOL(stdma_islocked); /* * Function: void stdma_init( void ) * * Purpose: Initialize the ST-DMA chip access controlling. * It sets up the interrupt and its service routine. The int is registered * as slow int, client devices have to live with that (no problem * currently). * * Inputs: none * * Return: nothing * */ void __init stdma_init(void) { stdma_isr = NULL; if (request_irq(IRQ_MFP_FDC, stdma_int, IRQF_SHARED, "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int)) pr_err("Couldn't register ST-DMA interrupt\n"); } /* * Function: void stdma_int() * * Purpose: The interrupt routine for the ST-DMA. It calls the isr * registered by stdma_lock(). * */ static irqreturn_t stdma_int(int irq, void *dummy) { if (stdma_isr) (*stdma_isr)(irq, stdma_isr_data); return IRQ_HANDLED; }
linux-master
arch/m68k/atari/stdma.c
/* * linux/arch/m68k/atari/debug.c * * Atari debugging and serial console stuff * * Assembled of parts of former atari/config.c 97-12-18 by Roman Hodek * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/atarihw.h> #include <asm/atariints.h> /* Can be set somewhere, if a SCC master reset has already be done and should * not be repeated; used by kgdb */ int atari_SCC_reset_done; EXPORT_SYMBOL(atari_SCC_reset_done); static struct console atari_console_driver = { .name = "debug", .flags = CON_PRINTBUFFER, .index = -1, }; static inline void ata_mfp_out(char c) { while (!(st_mfp.trn_stat & 0x80)) /* wait for tx buf empty */ barrier(); st_mfp.usart_dta = c; } static void atari_mfp_console_write(struct console *co, const char *str, unsigned int count) { while (count--) { if (*str == '\n') ata_mfp_out('\r'); ata_mfp_out(*str++); } } static inline void ata_scc_out(char c) { do { MFPDELAY(); } while (!(atari_scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */ MFPDELAY(); atari_scc.cha_b_data = c; } static void atari_scc_console_write(struct console *co, const char *str, unsigned int count) { while (count--) { if (*str == '\n') ata_scc_out('\r'); ata_scc_out(*str++); } } static inline void ata_midi_out(char c) { while (!(acia.mid_ctrl & ACIA_TDRE)) /* wait for tx buf empty */ barrier(); acia.mid_data = c; } static void atari_midi_console_write(struct console *co, const char *str, unsigned int count) { while (count--) { if (*str == '\n') ata_midi_out('\r'); ata_midi_out(*str++); } } static int ata_par_out(char c) { unsigned char tmp; /* This a some-seconds timeout in case no printer is connected */ unsigned long i = loops_per_jiffy > 1 ? loops_per_jiffy : 10000000/HZ; while ((st_mfp.par_dt_reg & 1) && --i) /* wait for BUSY == L */ ; if (!i) return 0; sound_ym.rd_data_reg_sel = 15; /* select port B */ sound_ym.wd_data = c; /* put char onto port */ sound_ym.rd_data_reg_sel = 14; /* select port A */ tmp = sound_ym.rd_data_reg_sel; sound_ym.wd_data = tmp & ~0x20; /* set strobe L */ MFPDELAY(); /* wait a bit */ sound_ym.wd_data = tmp | 0x20; /* set strobe H */ return 1; } static void atari_par_console_write(struct console *co, const char *str, unsigned int count) { static int printer_present = 1; if (!printer_present) return; while (count--) { if (*str == '\n') { if (!ata_par_out('\r')) { printer_present = 0; return; } } if (!ata_par_out(*str++)) { printer_present = 0; return; } } } #if 0 int atari_mfp_console_wait_key(struct console *co) { while (!(st_mfp.rcv_stat & 0x80)) /* wait for rx buf filled */ barrier(); return st_mfp.usart_dta; } int atari_scc_console_wait_key(struct console *co) { do { MFPDELAY(); } while (!(atari_scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */ MFPDELAY(); return atari_scc.cha_b_data; } int atari_midi_console_wait_key(struct console *co) { while (!(acia.mid_ctrl & ACIA_RDRF)) /* wait for rx buf filled */ barrier(); return acia.mid_data; } #endif /* * The following two functions do a quick'n'dirty initialization of the MFP or * SCC serial ports. They're used by the debugging interface, kgdb, and the * serial console code. */ static void __init atari_init_mfp_port(int cflag) { /* * timer values for 1200...115200 bps; > 38400 select 110, 134, or 150 * bps, resp., and work only correct if there's a RSVE or RSSPEED */ static int baud_table[9] = { 16, 11, 8, 4, 2, 1, 175, 143, 128 }; int baud = cflag & CBAUD; int parity = (cflag & PARENB) ? ((cflag & PARODD) ? 0x04 : 0x06) : 0; int csize = ((cflag & CSIZE) == CS7) ? 0x20 : 0x00; if (cflag & CBAUDEX) baud += B38400; if (baud < B1200 || baud > B38400+2) baud = B9600; /* use default 9600bps for non-implemented rates */ baud -= B1200; /* baud_table[] starts at 1200bps */ st_mfp.trn_stat &= ~0x01; /* disable TX */ st_mfp.usart_ctr = parity | csize | 0x88; /* 1:16 clk mode, 1 stop bit */ st_mfp.tim_ct_cd &= 0x70; /* stop timer D */ st_mfp.tim_dt_d = baud_table[baud]; st_mfp.tim_ct_cd |= 0x01; /* start timer D, 1:4 */ st_mfp.trn_stat |= 0x01; /* enable TX */ } #define SCC_WRITE(reg, val) \ do { \ atari_scc.cha_b_ctrl = (reg); \ MFPDELAY(); \ atari_scc.cha_b_ctrl = (val); \ MFPDELAY(); \ } while (0) /* loops_per_jiffy isn't initialized yet, so we can't use udelay(). This does a * delay of ~ 60us. */ #define LONG_DELAY() \ do { \ int i; \ for (i = 100; i > 0; --i) \ MFPDELAY(); \ } while (0) static void __init atari_init_scc_port(int cflag) { static int clksrc_table[9] = /* reg 11: 0x50 = BRG, 0x00 = RTxC, 0x28 = TRxC */ { 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x00, 0x00 }; static int brgsrc_table[9] = /* reg 14: 0 = RTxC, 2 = PCLK */ { 2, 2, 2, 2, 2, 2, 0, 2, 2 }; static int clkmode_table[9] = /* reg 4: 0x40 = x16, 0x80 = x32, 0xc0 = x64 */ { 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0xc0, 0x80 }; static int div_table[9] = /* reg12 (BRG low) */ { 208, 138, 103, 50, 24, 11, 1, 0, 0 }; int baud = cflag & CBAUD; int clksrc, clkmode, div, reg3, reg5; if (cflag & CBAUDEX) baud += B38400; if (baud < B1200 || baud > B38400+2) baud = B9600; /* use default 9600bps for non-implemented rates */ baud -= B1200; /* tables starts at 1200bps */ clksrc = clksrc_table[baud]; clkmode = clkmode_table[baud]; div = div_table[baud]; if (ATARIHW_PRESENT(TT_MFP) && baud >= 6) { /* special treatment for TT, where rates >= 38400 are done via TRxC */ clksrc = 0x28; /* TRxC */ clkmode = baud == 6 ? 0xc0 : baud == 7 ? 0x80 : /* really 76800bps */ 0x40; /* really 153600bps */ div = 0; } reg3 = (cflag & CSIZE) == CS8 ? 0xc0 : 0x40; reg5 = (cflag & CSIZE) == CS8 ? 0x60 : 0x20 | 0x82 /* assert DTR/RTS */; (void)atari_scc.cha_b_ctrl; /* reset reg pointer */ SCC_WRITE(9, 0xc0); /* reset */ LONG_DELAY(); /* extra delay after WR9 access */ SCC_WRITE(4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03) : 0 | 0x04 /* 1 stopbit */ | clkmode); SCC_WRITE(3, reg3); SCC_WRITE(5, reg5); SCC_WRITE(9, 0); /* no interrupts */ LONG_DELAY(); /* extra delay after WR9 access */ SCC_WRITE(10, 0); /* NRZ mode */ SCC_WRITE(11, clksrc); /* main clock source */ SCC_WRITE(12, div); /* BRG value */ SCC_WRITE(13, 0); /* BRG high byte */ SCC_WRITE(14, brgsrc_table[baud]); SCC_WRITE(14, brgsrc_table[baud] | (div ? 1 : 0)); SCC_WRITE(3, reg3 | 1); SCC_WRITE(5, reg5 | 8); atari_SCC_reset_done = 1; } static void __init atari_init_midi_port(int cflag) { int baud = cflag & CBAUD; int csize = ((cflag & CSIZE) == CS8) ? 0x10 : 0x00; /* warning 7N1 isn't possible! (instead 7O2 is used...) */ int parity = (cflag & PARENB) ? ((cflag & PARODD) ? 0x0c : 0x08) : 0x04; int div; /* 4800 selects 7812.5, 115200 selects 500000, all other (incl. 9600 as * default) the standard MIDI speed 31250. */ if (cflag & CBAUDEX) baud += B38400; if (baud == B4800) div = ACIA_DIV64; /* really 7812.5 bps */ else if (baud == B38400+2 /* 115200 */) div = ACIA_DIV1; /* really 500 kbps (does that work??) */ else div = ACIA_DIV16; /* 31250 bps, standard for MIDI */ /* RTS low, ints disabled */ acia.mid_ctrl = div | csize | parity | ((atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : ACIA_RLTID); } static int __init atari_debug_setup(char *arg) { bool registered; if (!MACH_IS_ATARI) return 0; if (!strcmp(arg, "ser")) /* defaults to ser2 for a Falcon and ser1 otherwise */ arg = MACH_IS_FALCON ? "ser2" : "ser1"; registered = !!atari_console_driver.write; if (!strcmp(arg, "ser1")) { /* ST-MFP Modem1 serial port */ atari_init_mfp_port(B9600|CS8); atari_console_driver.write = atari_mfp_console_write; } else if (!strcmp(arg, "ser2")) { /* SCC Modem2 serial port */ atari_init_scc_port(B9600|CS8); atari_console_driver.write = atari_scc_console_write; } else if (!strcmp(arg, "midi")) { /* MIDI port */ atari_init_midi_port(B9600|CS8); atari_console_driver.write = atari_midi_console_write; } else if (!strcmp(arg, "par")) { /* parallel printer */ atari_turnoff_irq(IRQ_MFP_BUSY); /* avoid ints */ sound_ym.rd_data_reg_sel = 7; /* select mixer control */ sound_ym.wd_data = 0xff; /* sound off, ports are output */ sound_ym.rd_data_reg_sel = 15; /* select port B */ sound_ym.wd_data = 0; /* no char */ sound_ym.rd_data_reg_sel = 14; /* select port A */ sound_ym.wd_data = sound_ym.rd_data_reg_sel | 0x20; /* strobe H */ atari_console_driver.write = atari_par_console_write; } if (atari_console_driver.write && !registered) register_console(&atari_console_driver); return 0; } early_param("debug", atari_debug_setup);
linux-master
arch/m68k/atari/debug.c
/* * linux/arch/m68k/atari/time.c * * Atari time and real time clock stuff * * Assembled of parts of former atari/config.c 97-12-18 by Roman Hodek * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/mc146818rtc.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/clocksource.h> #include <linux/delay.h> #include <linux/export.h> #include <asm/atariints.h> #include <asm/machdep.h> DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); static u64 atari_read_clk(struct clocksource *cs); static struct clocksource atari_clk = { .name = "mfp", .rating = 100, .read = atari_read_clk, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static u32 clk_total; static u8 last_timer_count; static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id) { unsigned long flags; local_irq_save(flags); do { last_timer_count = st_mfp.tim_dt_c; } while (last_timer_count == 1); clk_total += INT_TICKS; legacy_timer_tick(1); timer_heartbeat(); local_irq_restore(flags); return IRQ_HANDLED; } void __init atari_sched_init(void) { /* set Timer C data Register */ st_mfp.tim_dt_c = INT_TICKS; /* start timer C, div = 1:100 */ st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60; /* install interrupt service routine for MFP Timer C */ if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, IRQF_TIMER, "timer", NULL)) pr_err("Couldn't register timer interrupt\n"); clocksource_register_hz(&atari_clk, INT_CLK); } /* ++andreas: gettimeoffset fixed to check for pending interrupt */ static u64 atari_read_clk(struct clocksource *cs) { unsigned long flags; u8 count; u32 ticks; local_irq_save(flags); /* Ensure that the count is monotonically decreasing, even though * the result may briefly stop changing after counter wrap-around. */ count = min(st_mfp.tim_dt_c, last_timer_count); last_timer_count = count; ticks = INT_TICKS - count; ticks += clk_total; local_irq_restore(flags); return ticks; } static void mste_read(struct MSTE_RTC *val) { #define COPY(v) val->v=(mste_rtc.v & 0xf) do { COPY(sec_ones) ; COPY(sec_tens) ; COPY(min_ones) ; COPY(min_tens) ; COPY(hr_ones) ; COPY(hr_tens) ; COPY(weekday) ; COPY(day_ones) ; COPY(day_tens) ; COPY(mon_ones) ; COPY(mon_tens) ; COPY(year_ones) ; COPY(year_tens) ; /* prevent from reading the clock while it changed */ } while (val->sec_ones != (mste_rtc.sec_ones & 0xf)); #undef COPY } static void mste_write(struct MSTE_RTC *val) { #define COPY(v) mste_rtc.v=val->v do { COPY(sec_ones) ; COPY(sec_tens) ; COPY(min_ones) ; COPY(min_tens) ; COPY(hr_ones) ; COPY(hr_tens) ; COPY(weekday) ; COPY(day_ones) ; COPY(day_tens) ; COPY(mon_ones) ; COPY(mon_tens) ; COPY(year_ones) ; COPY(year_tens) ; /* prevent from writing the clock while it changed */ } while (val->sec_ones != (mste_rtc.sec_ones & 0xf)); #undef COPY } #define RTC_READ(reg) \ ({ unsigned char __val; \ (void) atari_writeb(reg,&tt_rtc.regsel); \ __val = tt_rtc.data; \ __val; \ }) #define RTC_WRITE(reg,val) \ do { \ atari_writeb(reg,&tt_rtc.regsel); \ tt_rtc.data = (val); \ } while(0) #define HWCLK_POLL_INTERVAL 5 int atari_mste_hwclk( int op, struct rtc_time *t ) { int hour, year; int hr24=0; struct MSTE_RTC val; mste_rtc.mode=(mste_rtc.mode | 1); hr24=mste_rtc.mon_tens & 1; mste_rtc.mode=(mste_rtc.mode & ~1); if (op) { /* write: prepare values */ val.sec_ones = t->tm_sec % 10; val.sec_tens = t->tm_sec / 10; val.min_ones = t->tm_min % 10; val.min_tens = t->tm_min / 10; hour = t->tm_hour; if (!hr24) { if (hour > 11) hour += 20 - 12; if (hour == 0 || hour == 20) hour += 12; } val.hr_ones = hour % 10; val.hr_tens = hour / 10; val.day_ones = t->tm_mday % 10; val.day_tens = t->tm_mday / 10; val.mon_ones = (t->tm_mon+1) % 10; val.mon_tens = (t->tm_mon+1) / 10; year = t->tm_year - 80; val.year_ones = year % 10; val.year_tens = year / 10; val.weekday = t->tm_wday; mste_write(&val); mste_rtc.mode=(mste_rtc.mode | 1); val.year_ones = (year % 4); /* leap year register */ mste_rtc.mode=(mste_rtc.mode & ~1); } else { mste_read(&val); t->tm_sec = val.sec_ones + val.sec_tens * 10; t->tm_min = val.min_ones + val.min_tens * 10; hour = val.hr_ones + val.hr_tens * 10; if (!hr24) { if (hour == 12 || hour == 12 + 20) hour -= 12; if (hour >= 20) hour += 12 - 20; } t->tm_hour = hour; t->tm_mday = val.day_ones + val.day_tens * 10; t->tm_mon = val.mon_ones + val.mon_tens * 10 - 1; t->tm_year = val.year_ones + val.year_tens * 10 + 80; t->tm_wday = val.weekday; } return 0; } int atari_tt_hwclk( int op, struct rtc_time *t ) { int sec=0, min=0, hour=0, day=0, mon=0, year=0, wday=0; unsigned long flags; unsigned char ctrl; int pm = 0; ctrl = RTC_READ(RTC_CONTROL); /* control registers are * independent from the UIP */ if (op) { /* write: prepare values */ sec = t->tm_sec; min = t->tm_min; hour = t->tm_hour; day = t->tm_mday; mon = t->tm_mon + 1; year = t->tm_year - atari_rtc_year_offset; wday = t->tm_wday + (t->tm_wday >= 0); if (!(ctrl & RTC_24H)) { if (hour > 11) { pm = 0x80; if (hour != 12) hour -= 12; } else if (hour == 0) hour = 12; } if (!(ctrl & RTC_DM_BINARY)) { sec = bin2bcd(sec); min = bin2bcd(min); hour = bin2bcd(hour); day = bin2bcd(day); mon = bin2bcd(mon); year = bin2bcd(year); if (wday >= 0) wday = bin2bcd(wday); } } /* Reading/writing the clock registers is a bit critical due to * the regular update cycle of the RTC. While an update is in * progress, registers 0..9 shouldn't be touched. * The problem is solved like that: If an update is currently in * progress (the UIP bit is set), the process sleeps for a while * (50ms). This really should be enough, since the update cycle * normally needs 2 ms. * If the UIP bit reads as 0, we have at least 244 usecs until the * update starts. This should be enough... But to be sure, * additionally the RTC_SET bit is set to prevent an update cycle. */ while( RTC_READ(RTC_FREQ_SELECT) & RTC_UIP ) { if (in_atomic() || irqs_disabled()) mdelay(1); else schedule_timeout_interruptible(HWCLK_POLL_INTERVAL); } local_irq_save(flags); RTC_WRITE( RTC_CONTROL, ctrl | RTC_SET ); if (!op) { sec = RTC_READ( RTC_SECONDS ); min = RTC_READ( RTC_MINUTES ); hour = RTC_READ( RTC_HOURS ); day = RTC_READ( RTC_DAY_OF_MONTH ); mon = RTC_READ( RTC_MONTH ); year = RTC_READ( RTC_YEAR ); wday = RTC_READ( RTC_DAY_OF_WEEK ); } else { RTC_WRITE( RTC_SECONDS, sec ); RTC_WRITE( RTC_MINUTES, min ); RTC_WRITE( RTC_HOURS, hour + pm); RTC_WRITE( RTC_DAY_OF_MONTH, day ); RTC_WRITE( RTC_MONTH, mon ); RTC_WRITE( RTC_YEAR, year ); if (wday >= 0) RTC_WRITE( RTC_DAY_OF_WEEK, wday ); } RTC_WRITE( RTC_CONTROL, ctrl & ~RTC_SET ); local_irq_restore(flags); if (!op) { /* read: adjust values */ if (hour & 0x80) { hour &= ~0x80; pm = 1; } if (!(ctrl & RTC_DM_BINARY)) { sec = bcd2bin(sec); min = bcd2bin(min); hour = bcd2bin(hour); day = bcd2bin(day); mon = bcd2bin(mon); year = bcd2bin(year); wday = bcd2bin(wday); } if (!(ctrl & RTC_24H)) { if (!pm && hour == 12) hour = 0; else if (pm && hour != 12) hour += 12; } t->tm_sec = sec; t->tm_min = min; t->tm_hour = hour; t->tm_mday = day; t->tm_mon = mon - 1; t->tm_year = year + atari_rtc_year_offset; t->tm_wday = wday - 1; } return( 0 ); }
linux-master
arch/m68k/atari/time.c
// SPDX-License-Identifier: GPL-2.0+ /* * CMOS/NV-RAM driver for Atari. Adapted from drivers/char/nvram.c. * Copyright (C) 1997 Roman Hodek <[email protected]> * idea by and with help from Richard Jelinek <[email protected]> * Portions copyright (c) 2001,2002 Sun Microsystems ([email protected]) * Further contributions from Cesar Barros, Erik Gilling, Tim Hockin and * Wim Van Sebroeck. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/nvram.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/atarihw.h> #include <asm/atariints.h> #define NVRAM_BYTES 50 /* It is worth noting that these functions all access bytes of general * purpose memory in the NVRAM - that is to say, they all add the * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not * know about the RTC cruft. */ /* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with * rtc_lock held. Due to the index-port/data-port design of the RTC, we * don't want two different things trying to get to it at once. (e.g. the * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) */ static unsigned char __nvram_read_byte(int i) { return CMOS_READ(NVRAM_FIRST_BYTE + i); } /* This races nicely with trying to read with checksum checking */ static void __nvram_write_byte(unsigned char c, int i) { CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); } /* On Ataris, the checksum is over all bytes except the checksum bytes * themselves; these are at the very end. */ #define ATARI_CKS_RANGE_START 0 #define ATARI_CKS_RANGE_END 47 #define ATARI_CKS_LOC 48 static int __nvram_check_checksum(void) { int i; unsigned char sum = 0; for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); } static void __nvram_set_checksum(void) { int i; unsigned char sum = 0; for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) sum += __nvram_read_byte(i); __nvram_write_byte(~sum, ATARI_CKS_LOC); __nvram_write_byte(sum, ATARI_CKS_LOC + 1); } long atari_nvram_set_checksum(void) { spin_lock_irq(&rtc_lock); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); return 0; } long atari_nvram_initialize(void) { loff_t i; spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) __nvram_write_byte(0, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); return 0; } ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos) { char *p = buf; loff_t i; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) { spin_unlock_irq(&rtc_lock); return -EIO; } for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) *p = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); *ppos = i; return p - buf; } ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos) { char *p = buf; loff_t i; spin_lock_irq(&rtc_lock); if (!__nvram_check_checksum()) { spin_unlock_irq(&rtc_lock); return -EIO; } for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) __nvram_write_byte(*p, i); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); *ppos = i; return p - buf; } ssize_t atari_nvram_get_size(void) { return NVRAM_BYTES; } #ifdef CONFIG_PROC_FS static struct { unsigned char val; const char *name; } boot_prefs[] = { { 0x80, "TOS" }, { 0x40, "ASV" }, { 0x20, "NetBSD (?)" }, { 0x10, "Linux" }, { 0x00, "unspecified" }, }; static const char * const languages[] = { "English (US)", "German", "French", "English (UK)", "Spanish", "Italian", "6 (undefined)", "Swiss (French)", "Swiss (German)", }; static const char * const dateformat[] = { "MM%cDD%cYY", "DD%cMM%cYY", "YY%cMM%cDD", "YY%cDD%cMM", "4 (undefined)", "5 (undefined)", "6 (undefined)", "7 (undefined)", }; static const char * const colors[] = { "2", "4", "16", "256", "65536", "??", "??", "??" }; static void atari_nvram_proc_read(unsigned char *nvram, struct seq_file *seq, void *offset) { int checksum; int i; unsigned int vmode; spin_lock_irq(&rtc_lock); checksum = __nvram_check_checksum(); spin_unlock_irq(&rtc_lock); seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); seq_puts(seq, "Boot preference : "); for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) if (nvram[1] == boot_prefs[i].val) { seq_printf(seq, "%s\n", boot_prefs[i].name); break; } if (i < 0) seq_printf(seq, "0x%02x (undefined)\n", nvram[1]); seq_printf(seq, "SCSI arbitration : %s\n", (nvram[16] & 0x80) ? "on" : "off"); seq_puts(seq, "SCSI host ID : "); if (nvram[16] & 0x80) seq_printf(seq, "%d\n", nvram[16] & 7); else seq_puts(seq, "n/a\n"); if (!MACH_IS_FALCON) return; seq_puts(seq, "OS language : "); if (nvram[6] < ARRAY_SIZE(languages)) seq_printf(seq, "%s\n", languages[nvram[6]]); else seq_printf(seq, "%u (undefined)\n", nvram[6]); seq_puts(seq, "Keyboard language: "); if (nvram[7] < ARRAY_SIZE(languages)) seq_printf(seq, "%s\n", languages[nvram[7]]); else seq_printf(seq, "%u (undefined)\n", nvram[7]); seq_puts(seq, "Date format : "); seq_printf(seq, dateformat[nvram[8] & 7], nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/'); seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12); seq_puts(seq, "Boot delay : "); if (nvram[10] == 0) seq_puts(seq, "default\n"); else seq_printf(seq, "%ds%s\n", nvram[10], nvram[10] < 8 ? ", no memory test" : ""); vmode = (nvram[14] << 8) | nvram[15]; seq_printf(seq, "Video mode : %s colors, %d columns, %s %s monitor\n", colors[vmode & 7], vmode & 8 ? 80 : 40, vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC"); seq_printf(seq, " %soverscan, compat. mode %s%s\n", vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off", vmode & 256 ? (vmode & 16 ? ", line doubling" : ", half screen") : ""); } static int nvram_proc_read(struct seq_file *seq, void *offset) { unsigned char contents[NVRAM_BYTES]; int i; spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) contents[i] = __nvram_read_byte(i); spin_unlock_irq(&rtc_lock); atari_nvram_proc_read(contents, seq, offset); return 0; } static int __init atari_nvram_init(void) { if (!(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))) return -ENODEV; if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) { pr_err("nvram: can't create /proc/driver/nvram\n"); return -ENOMEM; } return 0; } device_initcall(atari_nvram_init); #endif /* CONFIG_PROC_FS */
linux-master
arch/m68k/atari/nvram.c
/* * linux/arch/m68k/atari/config.c * * Copyright (C) 1994 Bjoern Brauel * * 5/2/94 Roman Hodek: * Added setting of time_adj to get a better clock. * * 5/14/94 Roman Hodek: * gettod() for TT * * 5/15/94 Roman Hodek: * hard_reset_now() for Atari (and others?) * * 94/12/30 Andreas Schwab: * atari_sched_init fixed to get precise clock. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Miscellaneous atari stuff */ #include <linux/types.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/console.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/usb/isp116x.h> #include <linux/vt_kern.h> #include <linux/module.h> #include <asm/bootinfo.h> #include <asm/bootinfo-atari.h> #include <asm/byteorder.h> #include <asm/setup.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stram.h> #include <asm/machdep.h> #include <asm/hwtest.h> #include <asm/io.h> #include <asm/config.h> u_long atari_mch_cookie; EXPORT_SYMBOL(atari_mch_cookie); u_long atari_mch_type; EXPORT_SYMBOL(atari_mch_type); struct atari_hw_present atari_hw_present; EXPORT_SYMBOL(atari_hw_present); u_long atari_switches; EXPORT_SYMBOL(atari_switches); int atari_dont_touch_floppy_select; EXPORT_SYMBOL(atari_dont_touch_floppy_select); int atari_rtc_year_offset; /* local function prototypes */ static void atari_reset(void); static void atari_get_model(char *model); static void atari_get_hardware_list(struct seq_file *m); /* atari specific irq functions */ extern void atari_init_IRQ (void); extern void atari_mksound(unsigned int count, unsigned int ticks); #ifdef CONFIG_HEARTBEAT static void atari_heartbeat(int on); #endif /* atari specific timer functions (in time.c) */ extern void atari_sched_init(void); extern int atari_mste_hwclk (int, struct rtc_time *); extern int atari_tt_hwclk (int, struct rtc_time *); /* ++roman: This is a more elaborate test for an SCC chip, since the plain * Medusa board generates DTACK at the SCC's standard addresses, but a SCC * board in the Medusa is possible. Also, the addresses where the ST_ESCC * resides generate DTACK without the chip, too. * The method is to write values into the interrupt vector register, that * should be readable without trouble (from channel A!). */ static int __init scc_test(volatile char *ctla) { if (!hwreg_present(ctla)) return 0; MFPDELAY(); *ctla = 2; MFPDELAY(); *ctla = 0x40; MFPDELAY(); *ctla = 2; MFPDELAY(); if (*ctla != 0x40) return 0; MFPDELAY(); *ctla = 2; MFPDELAY(); *ctla = 0x60; MFPDELAY(); *ctla = 2; MFPDELAY(); if (*ctla != 0x60) return 0; return 1; } /* * Parse an Atari-specific record in the bootinfo */ int __init atari_parse_bootinfo(const struct bi_record *record) { int unknown = 0; const void *data = record->data; switch (be16_to_cpu(record->tag)) { case BI_ATARI_MCH_COOKIE: atari_mch_cookie = be32_to_cpup(data); break; case BI_ATARI_MCH_TYPE: atari_mch_type = be32_to_cpup(data); break; default: unknown = 1; break; } return unknown; } /* Parse the Atari-specific switches= option. */ static int __init atari_switches_setup(char *str) { char switches[COMMAND_LINE_SIZE]; char *p; int ovsc_shift; char *args = switches; if (!MACH_IS_ATARI) return 0; /* copy string to local array, strsep works destructively... */ strcpy(switches, str); atari_switches = 0; /* parse the options */ while ((p = strsep(&args, ",")) != NULL) { if (!*p) continue; ovsc_shift = 0; if (strncmp(p, "ov_", 3) == 0) { p += 3; ovsc_shift = ATARI_SWITCH_OVSC_SHIFT; } if (strcmp(p, "ikbd") == 0) { /* RTS line of IKBD ACIA */ atari_switches |= ATARI_SWITCH_IKBD << ovsc_shift; } else if (strcmp(p, "midi") == 0) { /* RTS line of MIDI ACIA */ atari_switches |= ATARI_SWITCH_MIDI << ovsc_shift; } else if (strcmp(p, "snd6") == 0) { atari_switches |= ATARI_SWITCH_SND6 << ovsc_shift; } else if (strcmp(p, "snd7") == 0) { atari_switches |= ATARI_SWITCH_SND7 << ovsc_shift; } } return 0; } early_param("switches", atari_switches_setup); /* * Setup the Atari configuration info */ void __init config_atari(void) { unsigned short tos_version; memset(&atari_hw_present, 0, sizeof(atari_hw_present)); /* Change size of I/O space from 64KB to 4GB. */ ioport_resource.end = 0xFFFFFFFF; mach_sched_init = atari_sched_init; mach_init_IRQ = atari_init_IRQ; mach_get_model = atari_get_model; mach_get_hardware_list = atari_get_hardware_list; mach_reset = atari_reset; #if IS_ENABLED(CONFIG_INPUT_M68K_BEEP) mach_beep = atari_mksound; #endif #ifdef CONFIG_HEARTBEAT mach_heartbeat = atari_heartbeat; #endif /* Set switches as requested by the user */ if (atari_switches & ATARI_SWITCH_IKBD) acia.key_ctrl = ACIA_DIV64 | ACIA_D8N1S | ACIA_RHTID; if (atari_switches & ATARI_SWITCH_MIDI) acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ACIA_RHTID; if (atari_switches & (ATARI_SWITCH_SND6|ATARI_SWITCH_SND7)) { sound_ym.rd_data_reg_sel = 14; sound_ym.wd_data = sound_ym.rd_data_reg_sel | ((atari_switches&ATARI_SWITCH_SND6) ? 0x40 : 0) | ((atari_switches&ATARI_SWITCH_SND7) ? 0x80 : 0); } /* ++bjoern: * Determine hardware present */ pr_info("Atari hardware found:"); if (MACH_IS_MEDUSA) { /* There's no Atari video hardware on the Medusa, but all the * addresses below generate a DTACK so no bus error occurs! */ } else if (hwreg_present(f030_xreg)) { ATARIHW_SET(VIDEL_SHIFTER); pr_cont(" VIDEL"); /* This is a temporary hack: If there is Falcon video * hardware, we assume that the ST-DMA serves SCSI instead of * ACSI. In the future, there should be a better method for * this... */ ATARIHW_SET(ST_SCSI); pr_cont(" STDMA-SCSI"); } else if (hwreg_present(tt_palette)) { ATARIHW_SET(TT_SHIFTER); pr_cont(" TT_SHIFTER"); } else if (hwreg_present(&shifter_st.bas_hi)) { if (hwreg_present(&shifter_st.bas_lo) && (shifter_st.bas_lo = 0x0aau, shifter_st.bas_lo == 0x0aau)) { ATARIHW_SET(EXTD_SHIFTER); pr_cont(" EXTD_SHIFTER"); } else { ATARIHW_SET(STND_SHIFTER); pr_cont(" STND_SHIFTER"); } } if (hwreg_present(&st_mfp.par_dt_reg)) { ATARIHW_SET(ST_MFP); pr_cont(" ST_MFP"); } if (hwreg_present(&tt_mfp.par_dt_reg)) { ATARIHW_SET(TT_MFP); pr_cont(" TT_MFP"); } if (hwreg_present(&tt_scsi_dma.dma_addr_hi)) { ATARIHW_SET(SCSI_DMA); pr_cont(" TT_SCSI_DMA"); } /* * The ST-DMA address registers aren't readable * on all Medusas, so the test below may fail */ if (MACH_IS_MEDUSA || (hwreg_present(&st_dma.dma_vhi) && (st_dma.dma_vhi = 0x55) && (st_dma.dma_hi = 0xaa) && st_dma.dma_vhi == 0x55 && st_dma.dma_hi == 0xaa && (st_dma.dma_vhi = 0xaa) && (st_dma.dma_hi = 0x55) && st_dma.dma_vhi == 0xaa && st_dma.dma_hi == 0x55)) { ATARIHW_SET(EXTD_DMA); pr_cont(" EXTD_DMA"); } if (hwreg_present(&tt_scsi.scsi_data)) { ATARIHW_SET(TT_SCSI); pr_cont(" TT_SCSI"); } if (hwreg_present(&sound_ym.rd_data_reg_sel)) { ATARIHW_SET(YM_2149); pr_cont(" YM2149"); } if (!MACH_IS_MEDUSA && hwreg_present(&tt_dmasnd.ctrl)) { ATARIHW_SET(PCM_8BIT); pr_cont(" PCM"); } if (hwreg_present(&falcon_codec.unused5)) { ATARIHW_SET(CODEC); pr_cont(" CODEC"); } if (hwreg_present(&dsp56k_host_interface.icr)) { ATARIHW_SET(DSP56K); pr_cont(" DSP56K"); } if (hwreg_present(&tt_scc_dma.dma_ctrl) && #if 0 /* This test sucks! Who knows some better? */ (tt_scc_dma.dma_ctrl = 0x01, (tt_scc_dma.dma_ctrl & 1) == 1) && (tt_scc_dma.dma_ctrl = 0x00, (tt_scc_dma.dma_ctrl & 1) == 0) #else !MACH_IS_MEDUSA #endif ) { ATARIHW_SET(SCC_DMA); pr_cont(" SCC_DMA"); } if (scc_test(&atari_scc.cha_a_ctrl)) { ATARIHW_SET(SCC); pr_cont(" SCC"); } if (scc_test(&st_escc.cha_b_ctrl)) { ATARIHW_SET(ST_ESCC); pr_cont(" ST_ESCC"); } if (hwreg_present(&tt_scu.sys_mask)) { ATARIHW_SET(SCU); /* Assume a VME bus if there's a SCU */ ATARIHW_SET(VME); pr_cont(" VME SCU"); } if (hwreg_present((void *)(0xffff9210))) { ATARIHW_SET(ANALOG_JOY); pr_cont(" ANALOG_JOY"); } if (hwreg_present(blitter.halftone)) { ATARIHW_SET(BLITTER); pr_cont(" BLITTER"); } if (hwreg_present((void *)0xfff00039)) { ATARIHW_SET(IDE); pr_cont(" IDE"); } #if 1 /* This maybe wrong */ if (!MACH_IS_MEDUSA && hwreg_present(&tt_microwire.data) && hwreg_present(&tt_microwire.mask) && (tt_microwire.mask = 0x7ff, udelay(1), tt_microwire.data = MW_LM1992_PSG_HIGH | MW_LM1992_ADDR, udelay(1), tt_microwire.data != 0)) { ATARIHW_SET(MICROWIRE); while (tt_microwire.mask != 0x7ff) ; pr_cont(" MICROWIRE"); } #endif if (hwreg_present(&tt_rtc.regsel)) { ATARIHW_SET(TT_CLK); pr_cont(" TT_CLK"); mach_hwclk = atari_tt_hwclk; } if (hwreg_present(&mste_rtc.sec_ones)) { ATARIHW_SET(MSTE_CLK); pr_cont(" MSTE_CLK"); mach_hwclk = atari_mste_hwclk; } if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) && hwreg_write(&dma_wd.fdc_speed, 0)) { ATARIHW_SET(FDCSPEED); pr_cont(" FDC_SPEED"); } if (!ATARIHW_PRESENT(ST_SCSI)) { ATARIHW_SET(ACSI); pr_cont(" ACSI"); } pr_cont("\n"); if (CPU_IS_040_OR_060) /* Now it seems to be safe to turn of the tt0 transparent * translation (the one that must not be turned off in * head.S...) */ asm volatile ("\n" " moveq #0,%%d0\n" " .chip 68040\n" " movec %%d0,%%itt0\n" " movec %%d0,%%dtt0\n" " .chip 68k" : /* no outputs */ : /* no inputs */ : "d0"); /* allocator for memory that must reside in st-ram */ atari_stram_init(); /* Set up a mapping for the VMEbus address region: * * VME is either at phys. 0xfexxxxxx (TT) or 0xa00000..0xdfffff * (MegaSTE) In both cases, the whole 16 MB chunk is mapped at * 0xfe000000 virt., because this can be done with a single * transparent translation. On the 68040, lots of often unused * page tables would be needed otherwise. On a MegaSTE or similar, * the highest byte is stripped off by hardware due to the 24 bit * design of the bus. */ if (CPU_IS_020_OR_030) { unsigned long tt1_val; tt1_val = 0xfe008543; /* Translate 0xfexxxxxx, enable, cache * inhibit, read and write, FDC mask = 3, * FDC val = 4 -> Supervisor only */ asm volatile ("\n" " .chip 68030\n" " pmove %0,%/tt1\n" " .chip 68k" : : "m" (tt1_val)); } else { asm volatile ("\n" " .chip 68040\n" " movec %0,%%itt1\n" " movec %0,%%dtt1\n" " .chip 68k" : : "d" (0xfe00a040)); /* Translate 0xfexxxxxx, enable, * supervisor only, non-cacheable/ * serialized, writable */ } /* Fetch tos version at Physical 2 */ /* * We my not be able to access this address if the kernel is * loaded to st ram, since the first page is unmapped. On the * Medusa this is always the case and there is nothing we can do * about this, so we just assume the smaller offset. For the TT * we use the fact that in head.S we have set up a mapping * 0xFFxxxxxx -> 0x00xxxxxx, so that the first 16MB is accessible * in the last 16MB of the address space. */ tos_version = (MACH_IS_MEDUSA) ? 0xfff : *(unsigned short *)0xff000002; atari_rtc_year_offset = (tos_version < 0x306) ? 70 : 68; } #ifdef CONFIG_HEARTBEAT static void atari_heartbeat(int on) { unsigned char tmp; unsigned long flags; if (atari_dont_touch_floppy_select) return; local_irq_save(flags); sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */ tmp = sound_ym.rd_data_reg_sel; sound_ym.wd_data = on ? (tmp & ~0x02) : (tmp | 0x02); local_irq_restore(flags); } #endif /* ++roman: * * This function does a reset on machines that lack the ability to * assert the processor's _RESET signal somehow via hardware. It is * based on the fact that you can find the initial SP and PC values * after a reset at physical addresses 0 and 4. This works pretty well * for Atari machines, since the lowest 8 bytes of physical memory are * really ROM (mapped by hardware). For other 680x0 machines: don't * know if it works... * * To get the values at addresses 0 and 4, the MMU better is turned * off first. After that, we have to jump into physical address space * (the PC before the pmove statement points to the virtual address of * the code). Getting that physical address is not hard, but the code * becomes a bit complex since I've tried to ensure that the jump * statement after the pmove is in the cache already (otherwise the * processor can't fetch it!). For that, the code first jumps to the * jump statement with the (virtual) address of the pmove section in * an address register . The jump statement is surely in the cache * now. After that, that physical address of the reset code is loaded * into the same address register, pmove is done and the same jump * statements goes to the reset code. Since there are not many * statements between the two jumps, I hope it stays in the cache. * * The C code makes heavy use of the GCC features that you can get the * address of a C label. No hope to compile this with another compiler * than GCC! */ /* ++andreas: no need for complicated code, just depend on prefetch */ static void atari_reset(void) { long tc_val = 0; long reset_addr; /* * On the Medusa, phys. 0x4 may contain garbage because it's no * ROM. See above for explanation why we cannot use PTOV(4). */ reset_addr = MACH_IS_MEDUSA || MACH_IS_AB40 ? 0xe00030 : *(unsigned long *) 0xff000004; /* reset ACIA for switch off OverScan, if it's active */ if (atari_switches & ATARI_SWITCH_OVSC_IKBD) acia.key_ctrl = ACIA_RESET; if (atari_switches & ATARI_SWITCH_OVSC_MIDI) acia.mid_ctrl = ACIA_RESET; /* processor independent: turn off interrupts and reset the VBR; * the caches must be left enabled, else prefetching the final jump * instruction doesn't work. */ local_irq_disable(); asm volatile ("movec %0,%%vbr" : : "d" (0)); if (CPU_IS_040_OR_060) { unsigned long jmp_addr040 = virt_to_phys(&&jmp_addr_label040); if (CPU_IS_060) { /* 68060: clear PCR to turn off superscalar operation */ asm volatile ("\n" " .chip 68060\n" " movec %0,%%pcr\n" " .chip 68k" : : "d" (0)); } asm volatile ("\n" " move.l %0,%%d0\n" " and.l #0xff000000,%%d0\n" " or.w #0xe020,%%d0\n" /* map 16 MB, enable, cacheable */ " .chip 68040\n" " movec %%d0,%%itt0\n" " movec %%d0,%%dtt0\n" " .chip 68k\n" " jmp %0@" : : "a" (jmp_addr040) : "d0"); jmp_addr_label040: asm volatile ("\n" " moveq #0,%%d0\n" " nop\n" " .chip 68040\n" " cinva %%bc\n" " nop\n" " pflusha\n" " nop\n" " movec %%d0,%%tc\n" " nop\n" /* the following setup of transparent translations is needed on the * Afterburner040 to successfully reboot. Other machines shouldn't * care about a different tt regs setup, they also didn't care in * the past that the regs weren't turned off. */ " move.l #0xffc000,%%d0\n" /* whole insn space cacheable */ " movec %%d0,%%itt0\n" " movec %%d0,%%itt1\n" " or.w #0x40,%/d0\n" /* whole data space non-cacheable/ser. */ " movec %%d0,%%dtt0\n" " movec %%d0,%%dtt1\n" " .chip 68k\n" " jmp %0@" : /* no outputs */ : "a" (reset_addr) : "d0"); } else asm volatile ("\n" " pmove %0,%%tc\n" " jmp %1@" : /* no outputs */ : "m" (tc_val), "a" (reset_addr)); } static void atari_get_model(char *model) { strcpy(model, "Atari "); switch (atari_mch_cookie >> 16) { case ATARI_MCH_ST: if (ATARIHW_PRESENT(MSTE_CLK)) strcat(model, "Mega ST"); else strcat(model, "ST"); break; case ATARI_MCH_STE: if (MACH_IS_MSTE) strcat(model, "Mega STE"); else strcat(model, "STE"); break; case ATARI_MCH_TT: if (MACH_IS_MEDUSA) /* Medusa has TT _MCH cookie */ strcat(model, "Medusa"); else strcat(model, "TT"); break; case ATARI_MCH_FALCON: strcat(model, "Falcon"); if (MACH_IS_AB40) strcat(model, " (with Afterburner040)"); break; default: sprintf(model + strlen(model), "(unknown mach cookie 0x%lx)", atari_mch_cookie); break; } } static void atari_get_hardware_list(struct seq_file *m) { int i; for (i = 0; i < m68k_num_memory; i++) seq_printf(m, "\t%3ld MB at 0x%08lx (%s)\n", m68k_memory[i].size >> 20, m68k_memory[i].addr, (m68k_memory[i].addr & 0xff000000 ? "alternate RAM" : "ST-RAM")); #define ATARIHW_ANNOUNCE(name, str) \ if (ATARIHW_PRESENT(name)) \ seq_printf(m, "\t%s\n", str) seq_puts(m, "Detected hardware:\n"); ATARIHW_ANNOUNCE(STND_SHIFTER, "ST Shifter"); ATARIHW_ANNOUNCE(EXTD_SHIFTER, "STe Shifter"); ATARIHW_ANNOUNCE(TT_SHIFTER, "TT Shifter"); ATARIHW_ANNOUNCE(VIDEL_SHIFTER, "Falcon Shifter"); ATARIHW_ANNOUNCE(YM_2149, "Programmable Sound Generator"); ATARIHW_ANNOUNCE(PCM_8BIT, "PCM 8 Bit Sound"); ATARIHW_ANNOUNCE(CODEC, "CODEC Sound"); ATARIHW_ANNOUNCE(TT_SCSI, "SCSI Controller NCR5380 (TT style)"); ATARIHW_ANNOUNCE(ST_SCSI, "SCSI Controller NCR5380 (Falcon style)"); ATARIHW_ANNOUNCE(ACSI, "ACSI Interface"); ATARIHW_ANNOUNCE(IDE, "IDE Interface"); ATARIHW_ANNOUNCE(FDCSPEED, "8/16 Mhz Switch for FDC"); ATARIHW_ANNOUNCE(ST_MFP, "Multi Function Peripheral MFP 68901"); ATARIHW_ANNOUNCE(TT_MFP, "Second Multi Function Peripheral MFP 68901"); ATARIHW_ANNOUNCE(SCC, "Serial Communications Controller SCC 8530"); ATARIHW_ANNOUNCE(ST_ESCC, "Extended Serial Communications Controller SCC 85230"); ATARIHW_ANNOUNCE(ANALOG_JOY, "Paddle Interface"); ATARIHW_ANNOUNCE(MICROWIRE, "MICROWIRE(tm) Interface"); ATARIHW_ANNOUNCE(STND_DMA, "DMA Controller (24 bit)"); ATARIHW_ANNOUNCE(EXTD_DMA, "DMA Controller (32 bit)"); ATARIHW_ANNOUNCE(SCSI_DMA, "DMA Controller for NCR5380"); ATARIHW_ANNOUNCE(SCC_DMA, "DMA Controller for SCC"); ATARIHW_ANNOUNCE(TT_CLK, "Clock Chip MC146818A"); ATARIHW_ANNOUNCE(MSTE_CLK, "Clock Chip RP5C15"); ATARIHW_ANNOUNCE(SCU, "System Control Unit"); ATARIHW_ANNOUNCE(BLITTER, "Blitter"); ATARIHW_ANNOUNCE(VME, "VME Bus"); ATARIHW_ANNOUNCE(DSP56K, "DSP56001 processor"); } /* * MSch: initial platform device support for Atari, * required for EtherNAT/EtherNEC/NetUSBee drivers */ #if defined(CONFIG_ATARI_ETHERNAT) || defined(CONFIG_ATARI_ETHERNEC) static void isp1160_delay(struct device *dev, int delay) { ndelay(delay); } #endif #ifdef CONFIG_ATARI_ETHERNAT /* * EtherNAT: SMC91C111 Ethernet chipset, handled by smc91x driver */ #define ATARI_ETHERNAT_IRQ 140 static struct resource smc91x_resources[] = { [0] = { .name = "smc91x-regs", .start = ATARI_ETHERNAT_PHYS_ADDR, .end = ATARI_ETHERNAT_PHYS_ADDR + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .name = "smc91x-irq", .start = ATARI_ETHERNAT_IRQ, .end = ATARI_ETHERNAT_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = -1, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; /* * ISP 1160 - using the isp116x-hcd module */ #define ATARI_USB_PHYS_ADDR 0x80000012 #define ATARI_USB_IRQ 139 static struct resource isp1160_resources[] = { [0] = { .name = "isp1160-data", .start = ATARI_USB_PHYS_ADDR, .end = ATARI_USB_PHYS_ADDR + 0x1, .flags = IORESOURCE_MEM, }, [1] = { .name = "isp1160-regs", .start = ATARI_USB_PHYS_ADDR + 0x4, .end = ATARI_USB_PHYS_ADDR + 0x5, .flags = IORESOURCE_MEM, }, [2] = { .name = "isp1160-irq", .start = ATARI_USB_IRQ, .end = ATARI_USB_IRQ, .flags = IORESOURCE_IRQ, }, }; /* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ static struct isp116x_platform_data isp1160_platform_data = { /* Enable internal resistors on downstream ports */ .sel15Kres = 1, /* On-chip overcurrent protection */ .oc_enable = 1, /* INT output polarity */ .int_act_high = 1, /* INT edge or level triggered */ .int_edge_triggered = 0, /* WAKEUP pin connected - NOT SUPPORTED */ /* .remote_wakeup_connected = 0, */ /* Wakeup by devices on usb bus enabled */ .remote_wakeup_enable = 0, .delay = isp1160_delay, }; static struct platform_device isp1160_device = { .name = "isp116x-hcd", .id = 0, .num_resources = ARRAY_SIZE(isp1160_resources), .resource = isp1160_resources, .dev = { .platform_data = &isp1160_platform_data, }, }; static struct platform_device *atari_ethernat_devices[] __initdata = { &smc91x_device, &isp1160_device }; #endif /* CONFIG_ATARI_ETHERNAT */ #ifdef CONFIG_ATARI_ETHERNEC /* * EtherNEC: RTL8019 (NE2000 compatible) Ethernet chipset, * handled by ne.c driver */ #define ATARI_ETHERNEC_PHYS_ADDR 0xfffa0000 #define ATARI_ETHERNEC_BASE 0x300 #define ATARI_ETHERNEC_IRQ IRQ_MFP_TIMER1 static struct resource rtl8019_resources[] = { [0] = { .name = "rtl8019-regs", .start = ATARI_ETHERNEC_BASE, .end = ATARI_ETHERNEC_BASE + 0x20 - 1, .flags = IORESOURCE_IO, }, [1] = { .name = "rtl8019-irq", .start = ATARI_ETHERNEC_IRQ, .end = ATARI_ETHERNEC_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtl8019_device = { .name = "ne", .id = -1, .num_resources = ARRAY_SIZE(rtl8019_resources), .resource = rtl8019_resources, }; /* * NetUSBee: ISP1160 USB host adapter via ROM-port adapter */ #define ATARI_NETUSBEE_PHYS_ADDR 0xfffa8000 #define ATARI_NETUSBEE_BASE 0x340 #define ATARI_NETUSBEE_IRQ IRQ_MFP_TIMER2 static struct resource netusbee_resources[] = { [0] = { .name = "isp1160-data", .start = ATARI_NETUSBEE_BASE, .end = ATARI_NETUSBEE_BASE + 0x1, .flags = IORESOURCE_MEM, }, [1] = { .name = "isp1160-regs", .start = ATARI_NETUSBEE_BASE + 0x20, .end = ATARI_NETUSBEE_BASE + 0x21, .flags = IORESOURCE_MEM, }, [2] = { .name = "isp1160-irq", .start = ATARI_NETUSBEE_IRQ, .end = ATARI_NETUSBEE_IRQ, .flags = IORESOURCE_IRQ, }, }; /* (DataBusWidth16|AnalogOCEnable|DREQOutputPolarity|DownstreamPort15KRSel ) */ static struct isp116x_platform_data netusbee_platform_data = { /* Enable internal resistors on downstream ports */ .sel15Kres = 1, /* On-chip overcurrent protection */ .oc_enable = 1, /* INT output polarity */ .int_act_high = 1, /* INT edge or level triggered */ .int_edge_triggered = 0, /* WAKEUP pin connected - NOT SUPPORTED */ /* .remote_wakeup_connected = 0, */ /* Wakeup by devices on usb bus enabled */ .remote_wakeup_enable = 0, .delay = isp1160_delay, }; static struct platform_device netusbee_device = { .name = "isp116x-hcd", .id = 1, .num_resources = ARRAY_SIZE(netusbee_resources), .resource = netusbee_resources, .dev = { .platform_data = &netusbee_platform_data, }, }; static struct platform_device *atari_netusbee_devices[] __initdata = { &rtl8019_device, &netusbee_device }; #endif /* CONFIG_ATARI_ETHERNEC */ #if IS_ENABLED(CONFIG_ATARI_SCSI) static const struct resource atari_scsi_st_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_MFP_FSCSI, .end = IRQ_MFP_FSCSI, }, }; static const struct resource atari_scsi_tt_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = IRQ_TT_MFP_SCSI, .end = IRQ_TT_MFP_SCSI, }, }; #endif /* * Falcon IDE interface */ #define FALCON_IDE_BASE 0xfff00000 static const struct resource atari_falconide_rsrc[] __initconst = { DEFINE_RES_MEM(FALCON_IDE_BASE, 0x38), DEFINE_RES_MEM(FALCON_IDE_BASE + 0x38, 2), }; int __init atari_platform_init(void) { struct platform_device *pdev; int rv = 0; if (!MACH_IS_ATARI) return -ENODEV; #ifdef CONFIG_ATARI_ETHERNAT { unsigned char *enatc_virt; enatc_virt = (unsigned char *)ioremap((ATARI_ETHERNAT_PHYS_ADDR+0x23), 0xf); if (hwreg_present(enatc_virt)) { rv = platform_add_devices(atari_ethernat_devices, ARRAY_SIZE(atari_ethernat_devices)); } iounmap(enatc_virt); } #endif #ifdef CONFIG_ATARI_ETHERNEC { int error; unsigned char *enec_virt; enec_virt = (unsigned char *)ioremap((ATARI_ETHERNEC_PHYS_ADDR), 0xf); if (hwreg_present(enec_virt)) { error = platform_add_devices(atari_netusbee_devices, ARRAY_SIZE(atari_netusbee_devices)); if (error && !rv) rv = error; } iounmap(enec_virt); } #endif #if IS_ENABLED(CONFIG_ATARI_SCSI) if (ATARIHW_PRESENT(ST_SCSI)) platform_device_register_simple("atari_scsi", -1, atari_scsi_st_rsrc, ARRAY_SIZE(atari_scsi_st_rsrc)); else if (ATARIHW_PRESENT(TT_SCSI)) platform_device_register_simple("atari_scsi", -1, atari_scsi_tt_rsrc, ARRAY_SIZE(atari_scsi_tt_rsrc)); #endif if (ATARIHW_PRESENT(IDE)) { pdev = platform_device_register_simple("atari-falcon-ide", -1, atari_falconide_rsrc, ARRAY_SIZE(atari_falconide_rsrc)); if (IS_ERR(pdev)) rv = PTR_ERR(pdev); } return rv; } arch_initcall(atari_platform_init);
linux-master
arch/m68k/atari/config.c
/* * Atari Keyboard driver for 680x0 Linux * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Atari support by Robert de Vries * enhanced by Bjoern Brauel and Roman Hodek * * 2.6 and input cleanup (removed autorepeat stuff) for 2.6.21 * 06/07 Michael Schmitz */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/keyboard.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/kd.h> #include <linux/random.h> #include <linux/init.h> #include <linux/kbd_kern.h> #include <asm/atariints.h> #include <asm/atarihw.h> #include <asm/atarikb.h> #include <asm/atari_joystick.h> #include <asm/irq.h> /* Hook for MIDI serial driver */ void (*atari_MIDI_interrupt_hook) (void); /* Hook for keyboard inputdev driver */ void (*atari_input_keyboard_interrupt_hook) (unsigned char, char); /* Hook for mouse inputdev driver */ void (*atari_input_mouse_interrupt_hook) (char *); EXPORT_SYMBOL(atari_input_keyboard_interrupt_hook); EXPORT_SYMBOL(atari_input_mouse_interrupt_hook); /* variables for IKBD self test: */ /* state: 0: off; >0: in progress; >1: 0xf1 received */ static volatile int ikbd_self_test; /* timestamp when last received a char */ static volatile unsigned long self_test_last_rcv; /* bitmap of keys reported as broken */ static unsigned long broken_keys[128/(sizeof(unsigned long)*8)] = { 0, }; #define BREAK_MASK (0x80) /* * ++roman: The following changes were applied manually: * * - The Alt (= Meta) key works in combination with Shift and * Control, e.g. Alt+Shift+a sends Meta-A (0xc1), Alt+Control+A sends * Meta-Ctrl-A (0x81) ... * * - The parentheses on the keypad send '(' and ')' with all * modifiers (as would do e.g. keypad '+'), but they cannot be used as * application keys (i.e. sending Esc O c). * * - HELP and UNDO are mapped to be F21 and F24, resp, that send the * codes "\E[M" and "\E[P". (This is better than the old mapping to * F11 and F12, because these codes are on Shift+F1/2 anyway.) This * way, applications that allow their own keyboard mappings * (e.g. tcsh, X Windows) can be configured to use them in the way * the label suggests (providing help or undoing). * * - Console switching is done with Alt+Fx (consoles 1..10) and * Shift+Alt+Fx (consoles 11..20). * * - The misc. special function implemented in the kernel are mapped * to the following key combinations: * * ClrHome -> Home/Find * Shift + ClrHome -> End/Select * Shift + Up -> Page Up * Shift + Down -> Page Down * Alt + Help -> show system status * Shift + Help -> show memory info * Ctrl + Help -> show registers * Ctrl + Alt + Del -> Reboot * Alt + Undo -> switch to last console * Shift + Undo -> send interrupt * Alt + Insert -> stop/start output (same as ^S/^Q) * Alt + Up -> Scroll back console (if implemented) * Alt + Down -> Scroll forward console (if implemented) * Alt + CapsLock -> NumLock * * ++Andreas: * * - Help mapped to K_HELP * - Undo mapped to K_UNDO (= K_F246) * - Keypad Left/Right Parenthesis mapped to new K_PPAREN[LR] */ typedef enum kb_state_t { KEYBOARD, AMOUSE, RMOUSE, JOYSTICK, CLOCK, RESYNC } KB_STATE_T; #define IS_SYNC_CODE(sc) ((sc) >= 0x04 && (sc) <= 0xfb) typedef struct keyboard_state { unsigned char buf[6]; int len; KB_STATE_T state; } KEYBOARD_STATE; KEYBOARD_STATE kb_state; /* ++roman: If a keyboard overrun happened, we can't tell in general how much * bytes have been lost and in which state of the packet structure we are now. * This usually causes keyboards bytes to be interpreted as mouse movements * and vice versa, which is very annoying. It seems better to throw away some * bytes (that are usually mouse bytes) than to misinterpret them. Therefore I * introduced the RESYNC state for IKBD data. In this state, the bytes up to * one that really looks like a key event (0x04..0xf2) or the start of a mouse * packet (0xf8..0xfb) are thrown away, but at most 2 bytes. This at least * speeds up the resynchronization of the event structure, even if maybe a * mouse movement is lost. However, nothing is perfect. For bytes 0x01..0x03, * it's really hard to decide whether they're mouse or keyboard bytes. Since * overruns usually occur when moving the Atari mouse rapidly, they're seen as * mouse bytes here. If this is wrong, only a make code of the keyboard gets * lost, which isn't too bad. Losing a break code would be disastrous, * because then the keyboard repeat strikes... */ static irqreturn_t atari_keyboard_interrupt(int irq, void *dummy) { u_char acia_stat; int scancode; int break_flag; repeat: if (acia.mid_ctrl & ACIA_IRQ) if (atari_MIDI_interrupt_hook) atari_MIDI_interrupt_hook(); acia_stat = acia.key_ctrl; /* check out if the interrupt came from this ACIA */ if (!((acia_stat | acia.mid_ctrl) & ACIA_IRQ)) return IRQ_HANDLED; if (acia_stat & ACIA_OVRN) { /* a very fast typist or a slow system, give a warning */ /* ...happens often if interrupts were disabled for too long */ pr_debug("Keyboard overrun\n"); scancode = acia.key_data; if (ikbd_self_test) /* During self test, don't do resyncing, just process the code */ goto interpret_scancode; else if (IS_SYNC_CODE(scancode)) { /* This code seem already to be the start of a new packet or a * single scancode */ kb_state.state = KEYBOARD; goto interpret_scancode; } else { /* Go to RESYNC state and skip this byte */ kb_state.state = RESYNC; kb_state.len = 1; /* skip max. 1 another byte */ goto repeat; } } if (acia_stat & ACIA_RDRF) { /* received a character */ scancode = acia.key_data; /* get it or reset the ACIA, I'll get it! */ interpret_scancode: switch (kb_state.state) { case KEYBOARD: switch (scancode) { case 0xF7: kb_state.state = AMOUSE; kb_state.len = 0; break; case 0xF8: case 0xF9: case 0xFA: case 0xFB: kb_state.state = RMOUSE; kb_state.len = 1; kb_state.buf[0] = scancode; break; case 0xFC: kb_state.state = CLOCK; kb_state.len = 0; break; case 0xFE: case 0xFF: kb_state.state = JOYSTICK; kb_state.len = 1; kb_state.buf[0] = scancode; break; case 0xF1: /* during self-test, note that 0xf1 received */ if (ikbd_self_test) { ++ikbd_self_test; self_test_last_rcv = jiffies; break; } fallthrough; default: break_flag = scancode & BREAK_MASK; scancode &= ~BREAK_MASK; if (ikbd_self_test) { /* Scancodes sent during the self-test stand for broken * keys (keys being down). The code *should* be a break * code, but nevertheless some AT keyboard interfaces send * make codes instead. Therefore, simply ignore * break_flag... */ int keyval, keytyp; set_bit(scancode, broken_keys); self_test_last_rcv = jiffies; /* new Linux scancodes; approx. */ keyval = scancode; keytyp = KTYP(keyval) - 0xf0; keyval = KVAL(keyval); pr_warn("Key with scancode %d ", scancode); if (keytyp == KT_LATIN || keytyp == KT_LETTER) { if (keyval < ' ') pr_cont("('^%c') ", keyval + '@'); else pr_cont("('%c') ", keyval); } pr_cont("is broken -- will be ignored.\n"); break; } else if (test_bit(scancode, broken_keys)) break; if (atari_input_keyboard_interrupt_hook) atari_input_keyboard_interrupt_hook((unsigned char)scancode, !break_flag); break; } break; case AMOUSE: kb_state.buf[kb_state.len++] = scancode; if (kb_state.len == 5) { kb_state.state = KEYBOARD; /* not yet used */ /* wake up someone waiting for this */ } break; case RMOUSE: kb_state.buf[kb_state.len++] = scancode; if (kb_state.len == 3) { kb_state.state = KEYBOARD; if (atari_input_mouse_interrupt_hook) atari_input_mouse_interrupt_hook(kb_state.buf); } break; case JOYSTICK: kb_state.buf[1] = scancode; kb_state.state = KEYBOARD; #ifdef FIXED_ATARI_JOYSTICK atari_joystick_interrupt(kb_state.buf); #endif break; case CLOCK: kb_state.buf[kb_state.len++] = scancode; if (kb_state.len == 6) { kb_state.state = KEYBOARD; /* wake up someone waiting for this. But will this ever be used, as Linux keeps its own time. Perhaps for synchronization purposes? */ /* wake_up_interruptible(&clock_wait); */ } break; case RESYNC: if (kb_state.len <= 0 || IS_SYNC_CODE(scancode)) { kb_state.state = KEYBOARD; goto interpret_scancode; } kb_state.len--; break; } } #if 0 if (acia_stat & ACIA_CTS) /* cannot happen */; #endif if (acia_stat & (ACIA_FE | ACIA_PE)) { pr_err("Error in keyboard communication\n"); } /* handle_scancode() can take a lot of time, so check again if * some character arrived */ goto repeat; } /* * I write to the keyboard without using interrupts, I poll instead. * This takes for the maximum length string allowed (7) at 7812.5 baud * 8 data 1 start 1 stop bit: 9.0 ms * If this takes too long for normal operation, interrupt driven writing * is the solution. (I made a feeble attempt in that direction but I * kept it simple for now.) */ void ikbd_write(const char *str, int len) { u_char acia_stat; if ((len < 1) || (len > 7)) panic("ikbd: maximum string length exceeded"); while (len) { acia_stat = acia.key_ctrl; if (acia_stat & ACIA_TDRE) { acia.key_data = *str++; len--; } } } /* Reset (without touching the clock) */ void ikbd_reset(void) { static const char cmd[2] = { 0x80, 0x01 }; ikbd_write(cmd, 2); /* * if all's well code 0xF1 is returned, else the break codes of * all keys making contact */ } /* Set mouse button action */ void ikbd_mouse_button_action(int mode) { char cmd[2] = { 0x07, mode }; ikbd_write(cmd, 2); } /* Set relative mouse position reporting */ void ikbd_mouse_rel_pos(void) { static const char cmd[1] = { 0x08 }; ikbd_write(cmd, 1); } EXPORT_SYMBOL(ikbd_mouse_rel_pos); /* Set absolute mouse position reporting */ void ikbd_mouse_abs_pos(int xmax, int ymax) { char cmd[5] = { 0x09, xmax>>8, xmax&0xFF, ymax>>8, ymax&0xFF }; ikbd_write(cmd, 5); } /* Set mouse keycode mode */ void ikbd_mouse_kbd_mode(int dx, int dy) { char cmd[3] = { 0x0A, dx, dy }; ikbd_write(cmd, 3); } /* Set mouse threshold */ void ikbd_mouse_thresh(int x, int y) { char cmd[3] = { 0x0B, x, y }; ikbd_write(cmd, 3); } EXPORT_SYMBOL(ikbd_mouse_thresh); /* Set mouse scale */ void ikbd_mouse_scale(int x, int y) { char cmd[3] = { 0x0C, x, y }; ikbd_write(cmd, 3); } /* Interrogate mouse position */ void ikbd_mouse_pos_get(int *x, int *y) { static const char cmd[1] = { 0x0D }; ikbd_write(cmd, 1); /* wait for returning bytes */ } /* Load mouse position */ void ikbd_mouse_pos_set(int x, int y) { char cmd[6] = { 0x0E, 0x00, x>>8, x&0xFF, y>>8, y&0xFF }; ikbd_write(cmd, 6); } /* Set Y=0 at bottom */ void ikbd_mouse_y0_bot(void) { static const char cmd[1] = { 0x0F }; ikbd_write(cmd, 1); } /* Set Y=0 at top */ void ikbd_mouse_y0_top(void) { static const char cmd[1] = { 0x10 }; ikbd_write(cmd, 1); } EXPORT_SYMBOL(ikbd_mouse_y0_top); /* Disable mouse */ void ikbd_mouse_disable(void) { static const char cmd[1] = { 0x12 }; ikbd_write(cmd, 1); } EXPORT_SYMBOL(ikbd_mouse_disable); /* Set joystick event reporting */ void ikbd_joystick_event_on(void) { static const char cmd[1] = { 0x14 }; ikbd_write(cmd, 1); } /* Set joystick interrogation mode */ void ikbd_joystick_event_off(void) { static const char cmd[1] = { 0x15 }; ikbd_write(cmd, 1); } /* Joystick interrogation */ void ikbd_joystick_get_state(void) { static const char cmd[1] = { 0x16 }; ikbd_write(cmd, 1); } #if 0 /* This disables all other ikbd activities !!!! */ /* Set joystick monitoring */ void ikbd_joystick_monitor(int rate) { static const char cmd[2] = { 0x17, rate }; ikbd_write(cmd, 2); kb_state.state = JOYSTICK_MONITOR; } #endif /* some joystick routines not in yet (0x18-0x19) */ /* Disable joysticks */ void ikbd_joystick_disable(void) { static const char cmd[1] = { 0x1A }; ikbd_write(cmd, 1); } /* * The original code sometimes left the interrupt line of * the ACIAs low forever. I hope, it is fixed now. * * Martin Rogge, 20 Aug 1995 */ static int atari_keyb_done = 0; int atari_keyb_init(void) { int error; if (atari_keyb_done) return 0; kb_state.state = KEYBOARD; kb_state.len = 0; error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 0, "keyboard,mouse,MIDI", atari_keyboard_interrupt); if (error) return error; atari_turnoff_irq(IRQ_MFP_ACIA); do { /* reset IKBD ACIA */ acia.key_ctrl = ACIA_RESET | ((atari_switches & ATARI_SWITCH_IKBD) ? ACIA_RHTID : 0); (void)acia.key_ctrl; (void)acia.key_data; /* reset MIDI ACIA */ acia.mid_ctrl = ACIA_RESET | ((atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0); (void)acia.mid_ctrl; (void)acia.mid_data; /* divide 500kHz by 64 gives 7812.5 baud */ /* 8 data no parity 1 start 1 stop bit */ /* receive interrupt enabled */ /* RTS low (except if switch selected), transmit interrupt disabled */ acia.key_ctrl = (ACIA_DIV64|ACIA_D8N1S|ACIA_RIE) | ((atari_switches & ATARI_SWITCH_IKBD) ? ACIA_RHTID : ACIA_RLTID); acia.mid_ctrl = ACIA_DIV16 | ACIA_D8N1S | ((atari_switches & ATARI_SWITCH_MIDI) ? ACIA_RHTID : 0); /* make sure the interrupt line is up */ } while ((st_mfp.par_dt_reg & 0x10) == 0); /* enable ACIA Interrupts */ st_mfp.active_edge &= ~0x10; atari_turnon_irq(IRQ_MFP_ACIA); ikbd_self_test = 1; ikbd_reset(); /* wait for a period of inactivity (here: 0.25s), then assume the IKBD's * self-test is finished */ self_test_last_rcv = jiffies; while (time_before(jiffies, self_test_last_rcv + HZ/4)) barrier(); /* if not incremented: no 0xf1 received */ if (ikbd_self_test == 1) pr_err("Keyboard self test failed!\n"); ikbd_self_test = 0; ikbd_mouse_disable(); ikbd_joystick_disable(); #ifdef FIXED_ATARI_JOYSTICK atari_joystick_init(); #endif // flag init done atari_keyb_done = 1; return 0; } EXPORT_SYMBOL_GPL(atari_keyb_init);
linux-master
arch/m68k/atari/atakeyb.c
/* * linux/arch/m68k/sun3/sun3ints.c -- Sun-3(x) Linux interrupt handling code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <asm/intersil.h> #include <asm/oplib.h> #include <asm/sun3ints.h> #include <asm/irq_regs.h> #include <linux/seq_file.h> extern void sun3_leds (unsigned char); void sun3_disable_interrupts(void) { sun3_disable_irq(0); } void sun3_enable_interrupts(void) { sun3_enable_irq(0); } static int led_pattern[8] = { ~(0x80), ~(0x01), ~(0x40), ~(0x02), ~(0x20), ~(0x04), ~(0x10), ~(0x08) }; volatile unsigned char* sun3_intreg; void sun3_enable_irq(unsigned int irq) { *sun3_intreg |= (1 << irq); } void sun3_disable_irq(unsigned int irq) { *sun3_intreg &= ~(1 << irq); } static irqreturn_t sun3_int7(int irq, void *dev_id) { unsigned int cnt; cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 2000)) sun3_leds(led_pattern[cnt % 16000 / 2000]); return IRQ_HANDLED; } static irqreturn_t sun3_int5(int irq, void *dev_id) { unsigned long flags; unsigned int cnt; local_irq_save(flags); #ifdef CONFIG_SUN3 intersil_clear(); #endif sun3_disable_irq(5); sun3_enable_irq(5); #ifdef CONFIG_SUN3 intersil_clear(); #endif legacy_timer_tick(1); cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 20)) sun3_leds(led_pattern[cnt % 160 / 20]); local_irq_restore(flags); return IRQ_HANDLED; } static irqreturn_t sun3_vec255(int irq, void *dev_id) { return IRQ_HANDLED; } void __init sun3_init_IRQ(void) { *sun3_intreg = 1; m68k_setup_user_interrupt(VEC_USER, 128); if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "clock", NULL)) pr_err("Couldn't register %s interrupt\n", "int5"); if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "nmi", NULL)) pr_err("Couldn't register %s interrupt\n", "int7"); if (request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL)) pr_err("Couldn't register %s interrupt\n", "vec255"); }
linux-master
arch/m68k/sun3/sun3ints.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/m68k/sun3/sun3dvma.c * * Copyright (C) 2000 Sam Creasey * * Contains common routines for sun3/sun3x DVMA management. */ #include <linux/memblock.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/list.h> #include <asm/page.h> #include <asm/dvma.h> #undef DVMA_DEBUG #ifdef CONFIG_SUN3X extern void dvma_unmap_iommu(unsigned long baddr, int len); #else static inline void dvma_unmap_iommu(unsigned long a, int b) { } #endif #ifdef CONFIG_SUN3 extern void sun3_dvma_init(void); #endif static unsigned long *iommu_use; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) struct hole { unsigned long start; unsigned long end; unsigned long size; struct list_head list; }; static struct list_head hole_list; static struct list_head hole_cache; static struct hole initholes[64]; #ifdef DVMA_DEBUG static unsigned long dvma_allocs; static unsigned long dvma_frees; static unsigned long long dvma_alloc_bytes; static unsigned long long dvma_free_bytes; static void print_use(void) { int i; int j = 0; pr_info("dvma entry usage:\n"); for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { if(!iommu_use[i]) continue; j++; pr_info("dvma entry: %08x len %08lx\n", (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]); } pr_info("%d entries in use total\n", j); pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, dvma_free_bytes); } static void print_holes(struct list_head *holes) { struct list_head *cur; struct hole *hole; pr_info("listing dvma holes\n"); list_for_each(cur, holes) { hole = list_entry(cur, struct hole, list); if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) continue; pr_info("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size); } pr_info("end of hole listing...\n"); } #endif /* DVMA_DEBUG */ static inline int refill(void) { struct hole *hole; struct hole *prev = NULL; struct list_head *cur; int ret = 0; list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(!prev) { prev = hole; continue; } if(hole->end == prev->start) { hole->size += prev->size; hole->end = prev->end; list_move(&(prev->list), &hole_cache); ret++; } } return ret; } static inline struct hole *rmcache(void) { struct hole *ret; if(list_empty(&hole_cache)) { if(!refill()) { pr_crit("out of dvma hole cache!\n"); BUG(); } } ret = list_entry(hole_cache.next, struct hole, list); list_del(&(ret->list)); return ret; } static inline unsigned long get_baddr(int len, unsigned long align) { struct list_head *cur; struct hole *hole; if(list_empty(&hole_list)) { #ifdef DVMA_DEBUG pr_crit("out of dvma holes! (printing hole cache)\n"); print_holes(&hole_cache); print_use(); #endif BUG(); } list_for_each(cur, &hole_list) { unsigned long newlen; hole = list_entry(cur, struct hole, list); if(align > DVMA_PAGE_SIZE) newlen = len + ((hole->end - len) & (align-1)); else newlen = len; if(hole->size > newlen) { hole->end -= newlen; hole->size -= newlen; dvma_entry_use(hole->end) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->end; } else if(hole->size == newlen) { list_move(&(hole->list), &hole_cache); dvma_entry_use(hole->start) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->start; } } pr_crit("unable to find dvma hole!\n"); BUG(); return 0; } static inline int free_baddr(unsigned long baddr) { unsigned long len; struct hole *hole; struct list_head *cur; unsigned long orig_baddr; orig_baddr = baddr; len = dvma_entry_use(baddr); dvma_entry_use(baddr) = 0; baddr &= DVMA_PAGE_MASK; dvma_unmap_iommu(baddr, len); #ifdef DVMA_DEBUG dvma_frees++; dvma_free_bytes += len; #endif list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(hole->end == baddr) { hole->end += len; hole->size += len; return 0; } else if(hole->start == (baddr + len)) { hole->start = baddr; hole->size += len; return 0; } } hole = rmcache(); hole->start = baddr; hole->end = baddr + len; hole->size = len; // list_add_tail(&(hole->list), cur); list_add(&(hole->list), cur); return 0; } void __init dvma_init(void) { struct hole *hole; int i; INIT_LIST_HEAD(&hole_list); INIT_LIST_HEAD(&hole_cache); /* prepare the hole cache */ for(i = 0; i < 64; i++) list_add(&(initholes[i].list), &hole_cache); hole = rmcache(); hole->start = DVMA_START; hole->end = DVMA_END; hole->size = DVMA_SIZE; list_add(&(hole->list), &hole_list); iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), SMP_CACHE_BYTES); if (!iommu_use) panic("%s: Failed to allocate %zu bytes\n", __func__, IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); #ifdef CONFIG_SUN3 sun3_dvma_init(); #endif } unsigned long dvma_map_align(unsigned long kaddr, int len, int align) { unsigned long baddr; unsigned long off; if(!len) len = 0x800; if(!kaddr || !len) { // pr_err("error: kaddr %lx len %x\n", kaddr, len); // *(int *)4 = 0; return 0; } pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr); off = kaddr & ~DVMA_PAGE_MASK; kaddr &= PAGE_MASK; len += off; len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if(align == 0) align = DVMA_PAGE_SIZE; else align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); baddr = get_baddr(len, align); // pr_info("using baddr %lx\n", baddr); if(!dvma_map_iommu(kaddr, baddr, len)) return (baddr + off); pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len); BUG(); return 0; } EXPORT_SYMBOL(dvma_map_align); void dvma_unmap(void *baddr) { unsigned long addr; addr = (unsigned long)baddr; /* check if this is a vme mapping */ if(!(addr & 0x00f00000)) addr |= 0xf00000; free_baddr(addr); return; } EXPORT_SYMBOL(dvma_unmap); void *dvma_malloc_align(unsigned long len, unsigned long align) { unsigned long kaddr; unsigned long baddr; unsigned long vaddr; if(!len) return NULL; pr_debug("dvma_malloc request %lx bytes\n", len); len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) return NULL; if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { free_pages(kaddr, get_order(len)); return NULL; } vaddr = dvma_btov(baddr); if(dvma_map_cpu(kaddr, vaddr, len) < 0) { dvma_unmap((void *)baddr); free_pages(kaddr, get_order(len)); return NULL; } pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr, baddr); return (void *)vaddr; } EXPORT_SYMBOL(dvma_malloc_align); void dvma_free(void *vaddr) { return; } EXPORT_SYMBOL(dvma_free);
linux-master
arch/m68k/sun3/sun3dvma.c
// SPDX-License-Identifier: GPL-2.0 /* ** Tablewalk MMU emulator ** ** by Toshiyasu Morita ** ** Started 1/16/98 @ 2:22 am */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/delay.h> #include <linux/memblock.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/sched/mm.h> #include <asm/setup.h> #include <asm/traps.h> #include <linux/uaccess.h> #include <asm/page.h> #include <asm/sun3mmu.h> #include <asm/oplib.h> #include <asm/mmu_context.h> #include <asm/dvma.h> #undef DEBUG_MMU_EMU #define DEBUG_PROM_MAPS /* ** Defines */ #define CONTEXTS_NUM 8 #define SEGMAPS_PER_CONTEXT_NUM 2048 #define PAGES_PER_SEGMENT 16 #define PMEGS_NUM 256 #define PMEG_MASK 0xFF /* ** Globals */ unsigned long m68k_vmalloc_end; EXPORT_SYMBOL(m68k_vmalloc_end); unsigned long pmeg_vaddr[PMEGS_NUM]; unsigned char pmeg_alloc[PMEGS_NUM]; unsigned char pmeg_ctx[PMEGS_NUM]; /* pointers to the mm structs for each task in each context. 0xffffffff is a marker for kernel context */ static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = { [0] = (struct mm_struct *)0xffffffff }; /* has this context been mmdrop'd? */ static unsigned char ctx_avail = CONTEXTS_NUM-1; /* array of pages to be marked off for the rom when we do mem_init later */ /* 256 pages lets the rom take up to 2mb of physical ram.. I really hope it never wants mote than that. */ unsigned long rom_pages[256]; /* Print a PTE value in symbolic form. For debugging. */ void print_pte (pte_t pte) { #if 0 /* Verbose version. */ unsigned long val = pte_val (pte); pr_cont(" pte=%lx [addr=%lx", val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT); if (val & SUN3_PAGE_VALID) pr_cont(" valid"); if (val & SUN3_PAGE_WRITEABLE) pr_cont(" write"); if (val & SUN3_PAGE_SYSTEM) pr_cont(" sys"); if (val & SUN3_PAGE_NOCACHE) pr_cont(" nocache"); if (val & SUN3_PAGE_ACCESSED) pr_cont(" accessed"); if (val & SUN3_PAGE_MODIFIED) pr_cont(" modified"); switch (val & SUN3_PAGE_TYPE_MASK) { case SUN3_PAGE_TYPE_MEMORY: pr_cont(" memory"); break; case SUN3_PAGE_TYPE_IO: pr_cont(" io"); break; case SUN3_PAGE_TYPE_VME16: pr_cont(" vme16"); break; case SUN3_PAGE_TYPE_VME32: pr_cont(" vme32"); break; } pr_cont("]\n"); #else /* Terse version. More likely to fit on a line. */ unsigned long val = pte_val (pte); char flags[7], *type; flags[0] = (val & SUN3_PAGE_VALID) ? 'v' : '-'; flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-'; flags[2] = (val & SUN3_PAGE_SYSTEM) ? 's' : '-'; flags[3] = (val & SUN3_PAGE_NOCACHE) ? 'x' : '-'; flags[4] = (val & SUN3_PAGE_ACCESSED) ? 'a' : '-'; flags[5] = (val & SUN3_PAGE_MODIFIED) ? 'm' : '-'; flags[6] = '\0'; switch (val & SUN3_PAGE_TYPE_MASK) { case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break; case SUN3_PAGE_TYPE_IO: type = "io" ; break; case SUN3_PAGE_TYPE_VME16: type = "vme16" ; break; case SUN3_PAGE_TYPE_VME32: type = "vme32" ; break; default: type = "unknown?"; break; } pr_cont(" pte=%08lx [%07lx %s %s]\n", val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type); #endif } /* Print the PTE value for a given virtual address. For debugging. */ void print_pte_vaddr (unsigned long vaddr) { pr_cont(" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr)); print_pte (__pte (sun3_get_pte (vaddr))); } /* * Initialise the MMU emulator. */ void __init mmu_emu_init(unsigned long bootmem_end) { unsigned long seg, num; int i,j; memset(rom_pages, 0, sizeof(rom_pages)); memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr)); memset(pmeg_alloc, 0, sizeof(pmeg_alloc)); memset(pmeg_ctx, 0, sizeof(pmeg_ctx)); /* pmeg align the end of bootmem, adding another pmeg, * later bootmem allocations will likely need it */ bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK; /* mark all of the pmegs used thus far as reserved */ for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i) pmeg_alloc[i] = 2; /* I'm thinking that most of the top pmeg's are going to be used for something, and we probably shouldn't risk it */ for(num = 0xf0; num <= 0xff; num++) pmeg_alloc[num] = 2; /* liberate all existing mappings in the rest of kernel space */ for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) { i = sun3_get_segmap(seg); if(!pmeg_alloc[i]) { #ifdef DEBUG_MMU_EMU pr_info("freed:"); print_pte_vaddr (seg); #endif sun3_put_segmap(seg, SUN3_INVALID_PMEG); } } j = 0; for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) { if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) { #ifdef DEBUG_PROM_MAPS for(i = 0; i < 16; i++) { pr_info("mapped:"); print_pte_vaddr (seg + (i*PAGE_SIZE)); break; } #endif // the lowest mapping here is the end of our // vmalloc region if (!m68k_vmalloc_end) m68k_vmalloc_end = seg; // mark the segmap alloc'd, and reserve any // of the first 0xbff pages the hardware is // already using... does any sun3 support > 24mb? pmeg_alloc[sun3_get_segmap(seg)] = 2; } } dvma_init(); /* blank everything below the kernel, and we've got the base mapping to start all the contexts off with... */ for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) sun3_put_segmap(seg, SUN3_INVALID_PMEG); set_fc(3); for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) { i = sun3_get_segmap(seg); for(j = 1; j < CONTEXTS_NUM; j++) (*(romvec->pv_setctxt))(j, (void *)seg, i); } set_fc(USER_DATA); } /* erase the mappings for a dead context. Uses the pg_dir for hints as the pmeg tables proved somewhat unreliable, and unmapping all of TASK_SIZE was much slower and no more stable. */ /* todo: find a better way to keep track of the pmegs used by a context for when they're cleared */ void clear_context(unsigned long context) { unsigned char oldctx; unsigned long i; if(context) { if(!ctx_alloc[context]) panic("%s: context not allocated\n", __func__); ctx_alloc[context]->context = SUN3_INVALID_CONTEXT; ctx_alloc[context] = (struct mm_struct *)0; ctx_avail++; } oldctx = sun3_get_context(); sun3_put_context(context); for(i = 0; i < SUN3_INVALID_PMEG; i++) { if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) { sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG); pmeg_ctx[i] = 0; pmeg_alloc[i] = 0; pmeg_vaddr[i] = 0; } } sun3_put_context(oldctx); } /* gets an empty context. if full, kills the next context listed to die first */ /* This context invalidation scheme is, well, totally arbitrary, I'm sure it could be much more intelligent... but it gets the job done for now without much overhead in making it's decision. */ /* todo: come up with optimized scheme for flushing contexts */ unsigned long get_free_context(struct mm_struct *mm) { unsigned long new = 1; static unsigned char next_to_die = 1; if(!ctx_avail) { /* kill someone to get our context */ new = next_to_die; clear_context(new); next_to_die = (next_to_die + 1) & 0x7; if(!next_to_die) next_to_die++; } else { while(new < CONTEXTS_NUM) { if(ctx_alloc[new]) new++; else break; } // check to make sure one was really free... if(new == CONTEXTS_NUM) panic("%s: failed to find free context", __func__); } ctx_alloc[new] = mm; ctx_avail--; return new; } /* * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in * `context'. Maintain internal PMEG management structures. This doesn't * actually map the physical address, but does clear the old mappings. */ //todo: better allocation scheme? but is extra complexity worthwhile? //todo: only clear old entries if necessary? how to tell? inline void mmu_emu_map_pmeg (int context, int vaddr) { static unsigned char curr_pmeg = 128; int i; /* Round address to PMEG boundary. */ vaddr &= ~SUN3_PMEG_MASK; /* Find a spare one. */ while (pmeg_alloc[curr_pmeg] == 2) ++curr_pmeg; #ifdef DEBUG_MMU_EMU pr_info("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n", curr_pmeg, context, vaddr); #endif /* Invalidate old mapping for the pmeg, if any */ if (pmeg_alloc[curr_pmeg] == 1) { sun3_put_context(pmeg_ctx[curr_pmeg]); sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG); sun3_put_context(context); } /* Update PMEG management structures. */ // don't take pmeg's away from the kernel... if(vaddr >= PAGE_OFFSET) { /* map kernel pmegs into all contexts */ unsigned char i; for(i = 0; i < CONTEXTS_NUM; i++) { sun3_put_context(i); sun3_put_segmap (vaddr, curr_pmeg); } sun3_put_context(context); pmeg_alloc[curr_pmeg] = 2; pmeg_ctx[curr_pmeg] = 0; } else { pmeg_alloc[curr_pmeg] = 1; pmeg_ctx[curr_pmeg] = context; sun3_put_segmap (vaddr, curr_pmeg); } pmeg_vaddr[curr_pmeg] = vaddr; /* Set hardware mapping and clear the old PTE entries. */ for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE) sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM); /* Consider a different one next time. */ ++curr_pmeg; } /* * Handle a pagefault at virtual address `vaddr'; check if there should be a * page there (specifically, whether the software pagetables indicate that * there is). This is necessary due to the limited size of the second-level * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a * mapping present, we select a `spare' PMEG and use it to create a mapping. * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero * if we successfully handled the fault. */ //todo: should we bump minor pagefault counter? if so, here or in caller? //todo: possibly inline this into bus_error030 in <asm/buserror.h> ? // kernel_fault is set when a kernel page couldn't be demand mapped, // and forces another try using the kernel page table. basically a // hack so that vmalloc would work correctly. int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault) { unsigned long segment, offset; unsigned char context; pte_t *pte; pgd_t * crp; if(current->mm == NULL) { crp = swapper_pg_dir; context = 0; } else { context = current->mm->context; if(kernel_fault) crp = swapper_pg_dir; else crp = current->mm->pgd; } #ifdef DEBUG_MMU_EMU pr_info("%s: vaddr=%lx type=%s crp=%p\n", __func__, vaddr, read_flag ? "read" : "write", crp); #endif segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF; offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF; #ifdef DEBUG_MMU_EMU pr_info("%s: segment=%lx offset=%lx\n", __func__, segment, offset); #endif pte = (pte_t *) pgd_val (*(crp + segment)); //todo: next line should check for valid pmd properly. if (!pte) { // pr_info("mmu_emu_handle_fault: invalid pmd\n"); return 0; } pte = (pte_t *) __va ((unsigned long)(pte + offset)); /* Make sure this is a valid page */ if (!(pte_val (*pte) & SUN3_PAGE_VALID)) return 0; /* Make sure there's a pmeg allocated for the page */ if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG) mmu_emu_map_pmeg (context, vaddr); /* Write the pte value to hardware MMU */ sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte)); /* Update software copy of the pte value */ // I'm not sure this is necessary. If this is required, we ought to simply // copy this out when we reuse the PMEG or at some other convenient time. // Doing it here is fairly meaningless, anyway, as we only know about the // first access to a given page. --m if (!read_flag) { if (pte_val (*pte) & SUN3_PAGE_WRITEABLE) pte_val (*pte) |= (SUN3_PAGE_ACCESSED | SUN3_PAGE_MODIFIED); else return 0; /* Write-protect error. */ } else pte_val (*pte) |= SUN3_PAGE_ACCESSED; #ifdef DEBUG_MMU_EMU pr_info("seg:%ld crp:%p ->", get_fs().seg, crp); print_pte_vaddr (vaddr); pr_cont("\n"); #endif return 1; }
linux-master
arch/m68k/sun3/mmu_emu.c
/* * linux/arch/m68k/sun3/config.c * * Copyright (C) 1996,1997 Pekka Pietik{inen * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/platform_device.h> #include <asm/oplib.h> #include <asm/setup.h> #include <asm/contregs.h> #include <asm/movs.h> #include <asm/pgalloc.h> #include <asm/sun3-head.h> #include <asm/sun3mmu.h> #include <asm/machdep.h> #include <asm/machines.h> #include <asm/idprom.h> #include <asm/intersil.h> #include <asm/irq.h> #include <asm/sections.h> #include <asm/sun3ints.h> char sun3_reserved_pmeg[SUN3_PMEGS_NUM]; static void sun3_sched_init(void); extern void sun3_get_model (char* model); extern int sun3_hwclk(int set, struct rtc_time *t); volatile char* clock_va; extern unsigned long availmem; unsigned long num_pages; static void sun3_get_hardware_list(struct seq_file *m) { seq_printf(m, "PROM Revision:\t%s\n", romvec->pv_monid); } void __init sun3_init(void) { unsigned char enable_register; int i; m68k_machtype= MACH_SUN3; m68k_cputype = CPU_68020; m68k_fputype = FPU_68881; /* mc68881 actually */ m68k_mmutype = MMU_SUN3; clock_va = (char *) 0xfe06000; /* dark */ sun3_intreg = (unsigned char *) 0xfe0a000; /* magic */ sun3_disable_interrupts(); prom_init((void *)LINUX_OPPROM_BEGVM); GET_CONTROL_BYTE(AC_SENABLE,enable_register); enable_register |= 0x50; /* Enable FPU */ SET_CONTROL_BYTE(AC_SENABLE,enable_register); GET_CONTROL_BYTE(AC_SENABLE,enable_register); /* This code looks suspicious, because it doesn't subtract memory belonging to the kernel from the available space */ memset(sun3_reserved_pmeg, 0, sizeof(sun3_reserved_pmeg)); /* Reserve important PMEGS */ /* FIXME: These should be probed instead of hardcoded */ for (i=0; i<8; i++) /* Kernel PMEGs */ sun3_reserved_pmeg[i] = 1; sun3_reserved_pmeg[247] = 1; /* ROM mapping */ sun3_reserved_pmeg[248] = 1; /* AMD Ethernet */ sun3_reserved_pmeg[251] = 1; /* VB area */ sun3_reserved_pmeg[254] = 1; /* main I/O */ sun3_reserved_pmeg[249] = 1; sun3_reserved_pmeg[252] = 1; sun3_reserved_pmeg[253] = 1; set_fc(USER_DATA); } /* Without this, Bad Things happen when something calls arch_reset. */ static void sun3_reboot (void) { prom_reboot ("vmlinux"); } static void sun3_halt (void) { prom_halt (); } /* sun3 bootmem allocation */ static void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_end) { unsigned long start_page; /* align start/end to page boundaries */ memory_start = ((memory_start + (PAGE_SIZE-1)) & PAGE_MASK); memory_end = memory_end & PAGE_MASK; start_page = __pa(memory_start) >> PAGE_SHIFT; max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT; high_memory = (void *)memory_end; availmem = memory_start; m68k_setup_node(0); } void __init config_sun3(void) { unsigned long memory_start, memory_end; pr_info("ARCH: SUN3\n"); idprom_init(); /* Subtract kernel memory from available memory */ mach_sched_init = sun3_sched_init; mach_init_IRQ = sun3_init_IRQ; mach_reset = sun3_reboot; mach_get_model = sun3_get_model; mach_hwclk = sun3_hwclk; mach_halt = sun3_halt; mach_get_hardware_list = sun3_get_hardware_list; memory_start = ((((unsigned long)_end) + 0x2000) & ~0x1fff); // PROM seems to want the last couple of physical pages. --m memory_end = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE; m68k_num_memory=1; m68k_memory[0].size=*(romvec->pv_sun3mem); sun3_bootmem_alloc(memory_start, memory_end); } static void __init sun3_sched_init(void) { sun3_disable_interrupts(); intersil_clock->cmd_reg=(INTERSIL_RUN|INTERSIL_INT_DISABLE|INTERSIL_24H_MODE); intersil_clock->int_reg=INTERSIL_HZ_100_MASK; intersil_clear(); sun3_enable_irq(5); intersil_clock->cmd_reg=(INTERSIL_RUN|INTERSIL_INT_ENABLE|INTERSIL_24H_MODE); sun3_enable_interrupts(); intersil_clear(); } #if IS_ENABLED(CONFIG_SUN3_SCSI) static const struct resource sun3_scsi_vme_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = SUN3_VEC_VMESCSI0, .end = SUN3_VEC_VMESCSI0, }, { .flags = IORESOURCE_MEM, .start = 0xff200000, .end = 0xff200021, }, { .flags = IORESOURCE_IRQ, .start = SUN3_VEC_VMESCSI1, .end = SUN3_VEC_VMESCSI1, }, { .flags = IORESOURCE_MEM, .start = 0xff204000, .end = 0xff204021, }, }; /* * Int: level 2 autovector * IO: type 1, base 0x00140000, 5 bits phys space: A<4..0> */ static const struct resource sun3_scsi_rsrc[] __initconst = { { .flags = IORESOURCE_IRQ, .start = 2, .end = 2, }, { .flags = IORESOURCE_MEM, .start = 0x00140000, .end = 0x0014001f, }, }; int __init sun3_platform_init(void) { switch (idprom->id_machtype) { case SM_SUN3 | SM_3_160: case SM_SUN3 | SM_3_260: platform_device_register_simple("sun3_scsi_vme", -1, sun3_scsi_vme_rsrc, ARRAY_SIZE(sun3_scsi_vme_rsrc)); break; case SM_SUN3 | SM_3_50: case SM_SUN3 | SM_3_60: platform_device_register_simple("sun3_scsi", -1, sun3_scsi_rsrc, ARRAY_SIZE(sun3_scsi_rsrc)); break; } return 0; } arch_initcall(sun3_platform_init); #endif
linux-master
arch/m68k/sun3/config.c
/* * arch/m68k/sun3/intersil.c * * basic routines for accessing the intersil clock within the sun3 machines * * started 11/12/1999 Sam Creasey * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/rtc.h> #include <asm/errno.h> #include <asm/intersil.h> #include <asm/machdep.h> /* bits to set for start/run of the intersil */ #define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE) #define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE) /* get/set hwclock */ int sun3_hwclk(int set, struct rtc_time *t) { volatile struct intersil_dt *todintersil; unsigned long flags; todintersil = (struct intersil_dt *) &intersil_clock->counter; local_irq_save(flags); intersil_clock->cmd_reg = STOP_VAL; /* set or read the clock */ if(set) { todintersil->csec = 0; todintersil->hour = t->tm_hour; todintersil->minute = t->tm_min; todintersil->second = t->tm_sec; todintersil->month = t->tm_mon + 1; todintersil->day = t->tm_mday; todintersil->year = (t->tm_year - 68) % 100; todintersil->weekday = t->tm_wday; } else { /* read clock */ t->tm_sec = todintersil->csec; t->tm_hour = todintersil->hour; t->tm_min = todintersil->minute; t->tm_sec = todintersil->second; t->tm_mon = todintersil->month - 1; t->tm_mday = todintersil->day; t->tm_year = todintersil->year + 68; t->tm_wday = todintersil->weekday; if (t->tm_year < 70) t->tm_year += 100; } intersil_clock->cmd_reg = START_VAL; local_irq_restore(flags); return 0; }
linux-master
arch/m68k/sun3/intersil.c
// SPDX-License-Identifier: GPL-2.0 #include <asm/contregs.h> #include <asm/sun3mmu.h> #include <asm/io.h> void sun3_leds(unsigned char byte) { unsigned char dfc; GET_DFC(dfc); SET_DFC(FC_CONTROL); SET_CONTROL_BYTE(AC_LEDS, byte); SET_DFC(dfc); }
linux-master
arch/m68k/sun3/leds.c
// SPDX-License-Identifier: GPL-2.0 /* * idprom.c: Routines to load the idprom into kernel addresses and * interpret the data contained within. * * Copyright (C) 1995 David S. Miller ([email protected]) * Sun3/3x models added by David Monro ([email protected]) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/string.h> #include <asm/oplib.h> #include <asm/idprom.h> #include <asm/machines.h> /* Fun with Sun released architectures. */ struct idprom *idprom; EXPORT_SYMBOL(idprom); static struct idprom idprom_buffer; /* Here is the master table of Sun machines which use some implementation * of the Sparc CPU and have a meaningful IDPROM machtype value that we * know about. See asm-sparc/machines.h for empirical constants. */ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { /* First, Sun3's */ { .name = "Sun 3/160 Series", .id_machtype = (SM_SUN3 | SM_3_160) }, { .name = "Sun 3/50", .id_machtype = (SM_SUN3 | SM_3_50) }, { .name = "Sun 3/260 Series", .id_machtype = (SM_SUN3 | SM_3_260) }, { .name = "Sun 3/110 Series", .id_machtype = (SM_SUN3 | SM_3_110) }, { .name = "Sun 3/60", .id_machtype = (SM_SUN3 | SM_3_60) }, { .name = "Sun 3/E", .id_machtype = (SM_SUN3 | SM_3_E) }, /* Now, Sun3x's */ { .name = "Sun 3/460 Series", .id_machtype = (SM_SUN3X | SM_3_460) }, { .name = "Sun 3/80", .id_machtype = (SM_SUN3X | SM_3_80) }, /* Then, Sun4's */ // { .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) }, // { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, // { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, // { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, /* And now, Sun4c's */ // { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, // { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, // { .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) }, // { .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) }, // { .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) }, // { .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) }, // { .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) }, /* Finally, early Sun4m's */ // { .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) }, // { .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) }, // { .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) }, /* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */ // { .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } }; static void __init display_system_type(unsigned char machtype) { register int i; for (i = 0; i < NUM_SUN_MACHINES; i++) { if(Sun_Machines[i].id_machtype == machtype) { if (machtype != (SM_SUN4M_OBP | 0x00)) pr_info("TYPE: %s\n", Sun_Machines[i].name); else { #if 0 char sysname[128]; prom_getproperty(prom_root_node, "banner-name", sysname, sizeof(sysname)); pr_info("TYPE: %s\n", sysname); #endif } return; } } prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype); prom_halt(); } void sun3_get_model(unsigned char* model) { register int i; for (i = 0; i < NUM_SUN_MACHINES; i++) { if(Sun_Machines[i].id_machtype == idprom->id_machtype) { strcpy(model, Sun_Machines[i].name); return; } } } /* Calculate the IDPROM checksum (xor of the data bytes). */ static unsigned char __init calc_idprom_cksum(struct idprom *idprom) { unsigned char cksum, i, *ptr = (unsigned char *)idprom; for (i = cksum = 0; i <= 0x0E; i++) cksum ^= *ptr++; return cksum; } /* Create a local IDPROM copy, verify integrity, and display information. */ void __init idprom_init(void) { prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer)); idprom = &idprom_buffer; if (idprom->id_format != 0x01) { prom_printf("IDPROM: Unknown format type!\n"); prom_halt(); } if (idprom->id_cksum != calc_idprom_cksum(idprom)) { prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n", idprom->id_cksum, calc_idprom_cksum(idprom)); prom_halt(); } display_system_type(idprom->id_machtype); pr_info("Ethernet address: %pM\n", idprom->id_ethaddr); }
linux-master
arch/m68k/sun3/idprom.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/m68k/sun3/dvma.c * * Written by Sam Creasey * * Sun3 IOMMU routines used for dvma accesses. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/list.h> #include <asm/page.h> #include <asm/sun3mmu.h> #include <asm/dvma.h> static unsigned long ptelist[120]; static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) { unsigned long pte; unsigned long j; pte_t ptep; j = *(volatile unsigned long *)kaddr; *(volatile unsigned long *)kaddr = j; ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL); pte = pte_val(ptep); // pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte); if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { sun3_put_pte(vaddr, pte); ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; } return (vaddr + (kaddr & ~PAGE_MASK)); } int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, int len) { unsigned long end; unsigned long vaddr; vaddr = dvma_btov(baddr); end = vaddr + len; while(vaddr < end) { dvma_page(kaddr, vaddr); kaddr += PAGE_SIZE; vaddr += PAGE_SIZE; } return 0; } void __init sun3_dvma_init(void) { memset(ptelist, 0, sizeof(ptelist)); }
linux-master
arch/m68k/sun3/dvma.c