summaryrefslogtreecommitdiff
path: root/trafgen_dev.h
blob: 720c63061359c081577961cb50e76e0baf5c064a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#ifndef TRAFGEN_DEV_H
#define TRAFGEN_DEV_H

#include <stdbool.h>
#include <inttypes.h>

#include "pcap_io.h"

enum dev_io_mode_t {
	DEV_IO_IN	= 1 << 0,
	DEV_IO_OUT	= 1 << 1,
};

struct dev_io_ops;

struct dev_io {
	int fd;
	char *name;
	int ifindex;
	int dev_type;
	uint32_t link_type;
	uint32_t pcap_magic;
	bool is_initialized;
	enum pcap_mode pcap_mode;

	const struct pcap_file_ops *pcap_ops;
	const struct dev_io_ops *ops;
};

struct dev_io_ops {
	int(*open) (struct dev_io *dev, const char *name, enum dev_io_mode_t mode);
	int(*write) (struct dev_io *dev, const uint8_t *buf, size_t len);
	int(*read) (struct dev_io *dev, uint8_t *buf, size_t len, struct timespec *tstamp);
	void(*close) (struct dev_io *dev);
};

extern struct dev_io *dev_io_open(const char *name, enum dev_io_mode_t mode);
extern int dev_io_write(struct dev_io *dev, const uint8_t *buf, size_t len);
extern int dev_io_read(struct dev_io *dev, uint8_t *buf, size_t len,
		       struct timespec *tstamp);
extern int dev_io_ifindex_get(struct dev_io *dev);
extern int dev_io_fd_get(struct dev_io *dev);
extern const char *dev_io_name_get(struct dev_io *dev);
extern void dev_io_link_type_set(struct dev_io *dev, int link_type);
extern bool dev_io_is_netdev(struct dev_io *dev);
extern bool dev_io_is_pcap(struct dev_io *dev);
extern void dev_io_close(struct dev_io *dev);

#endif /* TRAFGEN_DEV_H */
evel cache. As a result, the scheduler fails to take advantage of cache locality while migrating tasks during load balancing. Here are the cpu masks currently present for sparc that are/can be used in scheduler domain construction. cpu_core_map : set based on the cores that shares l1 cache. core_core_sib_map : is set based on the socket id. The prior SPARC notion of socket was defined as highest level of shared cache. However, the MD record on T7 platforms now describes the CPUs that share the physical socket and this is no longer tied to shared cache. That's why a separate cpu mask needs to be created that truly represent highest level of shared cache for all platforms. Signed-off-by: Atish Patra <atish.patra@oracle.com> Reviewed-by: Chris Hyser <chris.hyser@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'Documentation')