1
0
mirror of https://github.com/xdp-project/BNG-router.git synced 2024-05-06 15:54:53 +00:00

Import build config and DHCP relay code from bpf-examples

The dhcp-relay utility was initially implemented as part of the
bpf-examples repository, but really belongs here. So import it along with
the build environment from bpf-examples.

Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk>
This commit is contained in:
Toke Høiland-Jørgensen
2021-08-18 14:40:32 +02:00
parent 41020f7b61
commit d38fb6ef73
41 changed files with 10606 additions and 51 deletions

561
.clang-format Normal file
View File

@ -0,0 +1,561 @@
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 4.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
#AfterExternBlock: false # Unknown to clang-format-5.0
BeforeCatch: false
BeforeElse: false
IndentBraces: false
#SplitEmptyFunction: true # Unknown to clang-format-4.0
#SplitEmptyRecord: true # Unknown to clang-format-4.0
#SplitEmptyNamespace: true # Unknown to clang-format-4.0
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
#CompactNamespaces: false # Unknown to clang-format-4.0
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
#FixNamespaceComments: false # Unknown to clang-format-4.0
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | sort | uniq
ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
- 'bio_for_each_integrity_vec'
- '__bio_for_each_segment'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bitmap_for_each_clear_region'
- 'bitmap_for_each_set_region'
- 'blkg_for_each_descendant_post'
- 'blkg_for_each_descendant_pre'
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'btree_for_each_safel'
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
- 'cpufreq_for_each_valid_entry_idx'
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'displayid_iter_for_each'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_dapms'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_card_widgets'
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_hstate'
- 'for_each_if'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
- 'for_each_mem_region'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
- 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_prop_codec_conf'
- 'for_each_prop_dai_codec'
- 'for_each_prop_dai_cpu'
- 'for_each_prop_dlc_codecs'
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sgtable_dma_page'
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
- 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
- 'hash_for_each_possible_rcu_notrace'
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
- 'hctx_for_each_ctx'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
- 'hlist_for_each'
- 'hlist_for_each_entry'
- 'hlist_for_each_entry_continue'
- 'hlist_for_each_entry_continue_rcu'
- 'hlist_for_each_entry_continue_rcu_bh'
- 'hlist_for_each_entry_from'
- 'hlist_for_each_entry_from_rcu'
- 'hlist_for_each_entry_rcu'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- 'hlist_for_each_entry_srcu'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'list_for_each_entry_reverse'
- 'list_for_each_entry_safe'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_entry_srcu'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'netdev_for_each_mc_addr'
- 'netdev_for_each_uc_addr'
- 'netdev_for_each_upper_dev_rcu'
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
- 'nla_for_each_nested'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
- 'nr_neigh_for_each_safe'
- 'nr_node_for_each'
- 'nr_node_for_each_safe'
- 'of_for_each_phandle'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
- 'plist_for_each_entry_continue'
- 'plist_for_each_entry_safe'
- 'plist_for_each_safe'
- 'pnp_for_each_card'
- 'pnp_for_each_dev'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
- 'shdma_for_each_chan'
- '__shost_for_each_device'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
- 'sk_for_each_safe'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_device_for_each_subdev'
- 'v4l2_m2m_for_each_dst_buf'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
#SortUsingDeclarations: false # Unknown to clang-format-4.0
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
SpaceBeforeParens: ControlStatements
#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 8
UseTab: Always
...

55
.gitignore vendored
View File

@ -1,52 +1,5 @@
# Prerequisites
*.d
# Object files
config.mk
.ccls-cache
compile_commands.json
*.ll
*.o
*.ko
*.obj
*.elf
# Linker output
*.ilk
*.map
*.exp
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
# Debug files
*.dSYM/
*.su
*.idb
*.pdb
# Kernel Module Compile Results
*.mod*
*.cmd
.tmp_versions/
modules.order
Module.symvers
Mkfile.old
dkms.conf

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "lib/libbpf"]
path = lib/libbpf
url = https://github.com/xdp-project/libbpf

63
Makefile Normal file
View File

@ -0,0 +1,63 @@
# SPDX-License-Identifier: GPL-2.0
# Top level Makefile for bpf-examples
ifeq ("$(origin V)", "command line")
VERBOSE = $(V)
endif
ifndef VERBOSE
VERBOSE = 0
endif
ifeq ($(VERBOSE),0)
MAKEFLAGS += --no-print-directory
Q = @
endif
SUBDIRS := dhcp-relay
.PHONY: check_submodule help clobber distclean clean $(SUBDIRS)
all: lib $(SUBDIRS)
lib: config.mk check_submodule
@echo; echo $@; $(MAKE) -C $@
$(SUBDIRS):
@echo; echo $@; $(MAKE) -C $@
help:
@echo "Make Targets:"
@echo " all - build binaries"
@echo " clean - remove products of build"
@echo " distclean - remove configuration and build"
@echo " install - install binaries on local machine"
@echo " test - run test suite"
@echo " archive - create tarball of all sources"
@echo ""
@echo "Make Arguments:"
@echo " V=[0|1] - set build verbosity level"
config.mk: configure
sh configure
check_submodule:
@if [ -d .git ] && `git submodule status lib/libbpf | grep -q '^+'`; then \
echo "" ;\
echo "** WARNING **: git submodule SHA-1 out-of-sync" ;\
echo " consider running: git submodule update" ;\
echo "" ;\
fi\
clobber:
touch config.mk
$(MAKE) clean
rm -f config.mk cscope.* compile_commands.json
distclean: clobber
clean: check_submodule
$(Q)for i in $(SUBDIRS); \
do $(MAKE) -C $$i clean; done
$(Q)$(MAKE) -C lib clean
compile_commands.json: clean
compiledb make V=1

210
configure vendored Executable file
View File

@ -0,0 +1,210 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# This is not an autoconf generated configure
#
# Output file which is input to Makefile
CONFIG_FINAL=config.mk
CONFIG=".${CONFIG}.tmp"
# Make a temp directory in build tree.
TMPDIR=$(mktemp -d config.XXXXXX)
trap 'status=$?; rm -rf $TMPDIR; rm -f $CONFIG; exit $status' EXIT HUP INT QUIT TERM
check_toolchain()
{
: ${PKG_CONFIG:=pkg-config}
: ${CC=gcc}
: ${CLANG=clang}
: ${LLC=llc}
for TOOL in $PKG_CONFIG $CC $CLANG $LLC; do
if [ ! $(command -v ${TOOL} 2>/dev/null) ]; then
echo "*** ERROR: Cannot find tool ${TOOL}" ;
exit 1;
fi;
done
echo "PKG_CONFIG:=${PKG_CONFIG}" >>$CONFIG
echo "CC:=${CC}" >>$CONFIG
echo "CLANG:=${CLANG}" >>$CONFIG
echo "LLC:=${LLC}" >>$CONFIG
}
check_elf()
{
if ${PKG_CONFIG} libelf --exists; then
echo "HAVE_ELF:=y" >>$CONFIG
echo "yes"
echo 'CFLAGS += -DHAVE_ELF' `${PKG_CONFIG} libelf --cflags` >> $CONFIG
echo 'LDLIBS += ' `${PKG_CONFIG} libelf --libs` >>$CONFIG
else
echo "missing - this is required"
return 1
fi
}
check_zlib()
{
if ${PKG_CONFIG} zlib --exists; then
echo "HAVE_ZLIB:=y" >>$CONFIG
echo "yes"
echo 'CFLAGS += -DHAVE_ZLIB' `${PKG_CONFIG} zlib --cflags` >> $CONFIG
echo 'LDLIBS += ' `${PKG_CONFIG} zlib --libs` >>$CONFIG
else
echo "missing - this is required"
return 1
fi
}
check_libbpf()
{
local libbpf_err
if [ "${FORCE_SUBMODULE_LIBBPF:-0}" -ne "1" ] && ${PKG_CONFIG} libbpf --exists || [ -n "$LIBBPF_DIR" ]; then
if [ -n "$LIBBPF_DIR" ]; then
LIBBPF_CFLAGS="-I${LIBBPF_DIR}/include -L${LIBBPF_DIR}/lib"
LIBBPF_LDLIBS="-lbpf"
else
LIBBPF_CFLAGS=$(${PKG_CONFIG} libbpf --cflags)
LIBBPF_LDLIBS=$(${PKG_CONFIG} libbpf --libs)
fi
cat >$TMPDIR/libbpftest.c <<EOF
#include <bpf/libbpf.h>
int main(int argc, char **argv) {
void *ptr;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, .pin_root_path = "/path");
DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, lopts, .old_fd = -1);
(void) bpf_object__open_file("file", &opts);
(void) bpf_program__name(ptr);
(void) bpf_map__set_initial_value(ptr, ptr, 0);
(void) bpf_set_link_xdp_fd_opts(0, 0, 0, &lopts);
(void) bpf_tc_attach(ptr, ptr);
return 0;
}
EOF
libbpf_err=$($CC -o $TMPDIR/libbpftest $TMPDIR/libbpftest.c $LIBBPF_CFLAGS -lbpf 2>&1)
if [ "$?" -eq "0" ]; then
echo "SYSTEM_LIBBPF:=y" >>$CONFIG
echo 'CFLAGS += ' $LIBBPF_CFLAGS >> $CONFIG
echo 'LDLIBS += ' $LIBBPF_LDLIBS >>$CONFIG
echo 'OBJECT_LIBBPF = ' >>$CONFIG
echo system
return 0
fi
else
libbpf_err="${PKG_CONFIG} couldn't find libbpf"
fi
if [ "${FORCE_SYSTEM_LIBBPF:-0}" -eq "1" ]; then
echo "FORCE_SYSTEM_LIBBPF is set, but no usable libbpf found on system"
echo "error: $libbpf_err"
rm -f "$CONFIG"
exit 1
fi
echo submodule
echo "SYSTEM_LIBBPF:=n" >> $CONFIG
echo 'CFLAGS += -I$(LIB_DIR)/libbpf-install/usr/include' >>$CONFIG
echo 'BPF_CFLAGS += -I$(LIB_DIR)/libbpf-install/usr/include' >>$CONFIG
echo 'LDFLAGS += -L$(LIB_DIR)/libbpf/src' >>$CONFIG
echo 'LDLIBS += -l:libbpf.a' >>$CONFIG
echo 'OBJECT_LIBBPF = $(LIB_DIR)/libbpf/src/libbpf.a' >>$CONFIG
if ! [ -d "lib/libbpf/src" ] && [ -f ".gitmodules" ] && [ -e ".git" ]; then
git submodule init && git submodule update
fi
echo -n "ELF support: "
check_elf || exit 1
echo -n "zlib support: "
check_zlib || exit 1
# For the build submodule library we know it does support this API, so we
# hard code it. Also due to the fact it's hard to build a test app as
# libbpf.a has not been build at configure time.
echo "HAVE_LIBBPF_PERF_BUFFER__CONSUME:=y" >>"$CONFIG"
}
check_bpf_use_errno()
{
local compile_err
# Clang BPF-progs when compiled with proper -target bpf cause
# build dependencies to include <gnu/stubs-32.h> file.
#
cat >$TMPDIR/bpf_use_errno_test.c <<EOF
#include <errno.h>
int dummy(void *ctx) { return 0; }
EOF
compile_err=$($CLANG -target bpf -c $TMPDIR/bpf_use_errno_test.c 2>&1)
if [ "$?" -ne "0" ]; then
echo "*** ERROR - Clang BPF-prog cannot include <errno.h>"
echo " - Install missing userspace header file"
echo ""
echo "Compile error: $compile_err"
echo ""
echo " On Fedora install:"
echo " dnf install glibc-devel.i686"
echo " On Debian install:"
echo " apt install libc6-dev-i386"
echo ""
exit 1
fi
}
quiet_config()
{
cat <<EOF
# user can control verbosity similar to kernel builds (e.g., V=1)
ifeq ("\$(origin V)", "command line")
VERBOSE = \$(V)
endif
ifndef VERBOSE
VERBOSE = 0
endif
ifeq (\$(VERBOSE),1)
Q =
else
Q = @
endif
ifeq (\$(VERBOSE),0)
MAKEFLAGS += --no-print-directory
endif
ifeq (\$(VERBOSE), 0)
QUIET_CC = @echo ' CC '\$@;
QUIET_CLANG = @echo ' CLANG '\$@;
QUIET_LLC = @echo ' LLC '\$@;
QUIET_LINK = @echo ' LINK '\$@;
QUIET_INSTALL = @echo ' INSTALL '\$@;
QUIET_GEN = @echo ' GEN '\$@;
endif
EOF
}
echo "# Generated config" >$CONFIG
quiet_config >> $CONFIG
check_toolchain
echo -n "libbpf support: "
check_libbpf
check_bpf_use_errno
if [ -n "$KERNEL_HEADERS" ]; then
echo "kernel headers: $KERNEL_HEADERS"
echo "CFLAGS += -I$KERNEL_HEADERS" >>$CONFIG
echo "BPF_CFLAGS += -I$KERNEL_HEADERS" >>$CONFIG
fi
mv $CONFIG $CONFIG_FINAL

3
dhcp-relay/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*.ll
*.o
dhcp_user_xdp

10
dhcp-relay/Makefile Normal file
View File

@ -0,0 +1,10 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
USER_TARGETS := dhcp_user_xdp
BPF_TARGETS :=dhcp_kern_xdp
EXTRA_DEPS := dhcp-relay.h
#EXTRA_CFLAGS := $(if $(IPV6),-DIPV6)
LIB_DIR = ../lib
include $(LIB_DIR)/common.mk

25
dhcp-relay/README Normal file
View File

@ -0,0 +1,25 @@
Usage
-----
dhcp_user_xdp takes network interface and dhcp relay server IP
as inputs and stores it in a map. Filters the incoming DHCP requests and inserts
option 82 in the DHCP request packets and overwrites the destination IP to that
of DHCP relay server IP.
Build instructions:
cd bpf-examples/dhcp-relay
make
Loading bpf program:
sudo ./dhcp_user_xdp -i <netif> -d <dhcp relay IP>
where,
netif: Ingress network interface name
unloading program:
sudo ./dhcp_user_xdp -i <netif> -u
To run in SKB mode:
add option "-m skb" for both load and uload commands
Verify using tcpdump:
sudo tcpdump -s 0 -i <netif> port 67 and port 68 -vvv

52
dhcp-relay/dhcp-relay.h Normal file
View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/bpf.h>
#include <linux/types.h>
#define XDP_PROG_SEC "xdp"
#define DHCP_SNAME_LEN 64
#define DHCP_FILE_LEN 128
#define DHO_DHCP_AGENT_OPTIONS 82
#define RAI_CIRCUIT_ID 1
#define RAI_REMOTE_ID 2
#define RAI_OPTION_LEN 2
#define DEST_PORT 67 /* UDP destination port for dhcp */
#define MAX_BYTES 280 /* Max bytes supported by xdp load/store apis */
/* structure for sub-options in option 82*/
struct sub_option {
__u8 option_id;
__u8 len;
__u16 val;
};
/*structure for dhcp option 82 */
struct dhcp_option_82 {
__u8 t;
__u8 len;
struct sub_option circuit_id;
struct sub_option remote_id;
};
struct dhcp_packet {
__u8 op; /* 0: Message opcode/type */
__u8 htype; /* 1: Hardware addr type (net/if_types.h) */
__u8 hlen; /* 2: Hardware addr length */
__u8 hops; /* 3: Number of relay agent hops from client */
__u32 xid; /* 4: Transaction ID */
__u16 secs; /* 8: Seconds since client started looking */
__u16 flags; /* 10: Flag bits */
struct in_addr ciaddr; /* 12: Client IP address (if already in use) */
struct in_addr yiaddr; /* 16: Client IP address */
struct in_addr siaddr; /* 18: IP address of next server to talk to */
struct in_addr giaddr; /* 20: DHCP relay agent IP address */
unsigned char chaddr[16]; /* 24: Client hardware address */
char sname[DHCP_SNAME_LEN]; /* 40: Server name */
char file[DHCP_FILE_LEN]; /* 104: Boot filename */
__u32 cookie; /* 232: Magic cookie */
unsigned char options[0];
/* 236: Optional parameters
(actual length dependent on MTU). */
};

183
dhcp-relay/dhcp_kern_xdp.c Normal file
View File

@ -0,0 +1,183 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/bpf.h>
#include <linux/in.h>
#include <bpf/bpf_helpers.h>
#include <xdp/parsing_helpers.h>
#include <xdp/context_helpers.h>
#include "dhcp-relay.h"
/*
* This map is for storing the DHCP relay server
* IP address configured by user. It is received
* as an argument by user program.
*/
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u32);
__uint(max_entries, 1);
} dhcp_server SEC(".maps");
/* Inserts DHCP option 82 into the received dhcp packet
* at the specified offset.
*/
static __always_inline int write_dhcp_option(void *ctx, int offset,
struct collect_vlans *vlans)
{
struct dhcp_option_82 option;
option.t = DHO_DHCP_AGENT_OPTIONS;
option.len = 8;
option.circuit_id.option_id = RAI_CIRCUIT_ID;
option.circuit_id.len = RAI_OPTION_LEN;
option.circuit_id.val = bpf_htons(vlans->id[0]);
option.remote_id.option_id = RAI_REMOTE_ID;
option.remote_id.len = RAI_OPTION_LEN;
option.remote_id.val = bpf_htons(vlans->id[1]);
return xdp_store_bytes(ctx, offset, &option, sizeof(option), 0);
}
/* Calculates the IP checksum */
static __always_inline int calc_ip_csum(struct iphdr *oldip, struct iphdr *ip,
__u32 oldcsum)
{
__u32 size = sizeof(struct iphdr);
__u32 csum = bpf_csum_diff((__be32 *)oldip, size, (__be32 *)ip, size,
~oldcsum);
__u32 sum = (csum >> 16) + (csum & 0xffff);
sum += (sum >> 16);
return sum;
}
/* Offset to DHCP Options part of the packet */
#define static_offset \
sizeof(struct ethhdr) + sizeof(struct iphdr) + sizeof(struct udphdr) + \
offsetof(struct dhcp_packet, options)
/* Delta value to be adjusted at xdp head*/
#define delta sizeof(struct dhcp_option_82)
/* buf needs to be a static global var because the verifier won't allow
* unaligned stack accesses
*/
static __u8 buf[static_offset + VLAN_MAX_DEPTH * sizeof(struct vlan_hdr)];
/* XDP program for parsing the DHCP packet and inserting the option 82*/
SEC(XDP_PROG_SEC)
int xdp_dhcp_relay(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct collect_vlans vlans = { 0 };
struct ethhdr *eth;
struct iphdr *ip;
struct iphdr oldip;
struct udphdr *udp;
__u32 *dhcp_srv;
int rc = XDP_PASS;
__u16 offset = static_offset;
__u16 ip_offset = 0;
int i = 0;
/* These keep track of the next header type and iterator pointer */
struct hdr_cursor nh;
int ether_type;
int h_proto = 0;
int key = 0;
int len = 0;
if (data + 1 > data_end)
return XDP_ABORTED;
nh.pos = data;
ether_type = parse_ethhdr_vlan(&nh, data_end, &eth, &vlans);
/* check for valid ether type */
if (ether_type < 0) {
rc = XDP_ABORTED;
goto out;
}
if (ether_type != bpf_htons(ETH_P_IP))
goto out;
/* Check at least two vlan tags are present */
if (vlans.id[1] == 0)
goto out;
/* Read dhcp relay server IP from map */
dhcp_srv = bpf_map_lookup_elem(&dhcp_server, &key);
if (dhcp_srv == NULL)
goto out;
h_proto = parse_iphdr(&nh, data_end, &ip);
/* only handle fixed-size IP header due to static copy */
if (h_proto != IPPROTO_UDP || ip->ihl > 5) {
goto out;
}
/*old ip hdr backup for re-calculating the checksum later*/
oldip = *ip;
ip_offset = ((void *)ip - data) & 0x3fff;
len = parse_udphdr(&nh, data_end, &udp);
if (len < 0)
goto out;
if (udp->dest != bpf_htons(DEST_PORT))
goto out;
if (xdp_load_bytes(ctx, 0, buf, static_offset))
goto out;
for (i = 0; i < VLAN_MAX_DEPTH; i++) {
if (vlans.id[i]) {
if (xdp_load_bytes(ctx, offset, buf + offset, 4))
goto out;
offset += 4;
}
}
/* adjusting the packet head by delta size to insert option82 */
if (bpf_xdp_adjust_head(ctx, 0 - delta) < 0)
return XDP_ABORTED;
data_end = (void *)(long)ctx->data_end;
data = (void *)(long)ctx->data;
if (data + offset > data_end)
return XDP_ABORTED;
if (xdp_store_bytes(ctx, 0, buf, static_offset, 0))
return XDP_ABORTED;
if (offset > static_offset) {
offset = static_offset;
for (i = 0; i < VLAN_MAX_DEPTH; i++) {
if (vlans.id[i]) {
if (xdp_store_bytes(ctx, offset, buf + offset,
4, 0))
return XDP_ABORTED;
offset += 4;
}
}
}
if (write_dhcp_option(ctx, offset, &vlans))
return XDP_ABORTED;
ip = data + ip_offset;
if (ip + 1 > data_end)
return XDP_ABORTED;
/* overwrite the destination IP in IP header */
ip->daddr = *dhcp_srv;
//re-calc ip checksum
__u32 sum = calc_ip_csum(&oldip, ip, oldip.check);
ip->check = ~sum;
rc = XDP_PASS;
goto out;
out:
return rc;
}

198
dhcp-relay/dhcp_user_xdp.c Normal file
View File

@ -0,0 +1,198 @@
/* SPDX-License-Identifier: GPL-2.0 */
static const char *__doc__ = "DHCP relay program to add Option 82\n";
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <getopt.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
#include <linux/if_link.h> /* depend on kernel-headers installed */
#include <arpa/inet.h>
#define SERVER_MAP "dhcp_server"
#define XDP_OBJ "dhcp_kern_xdp.o"
static const struct option options[] = {
{ "help", no_argument, NULL, 'h' },
{ "interface", required_argument, NULL,
'i' }, // Name of interface to run on
{ "dhcp-server", required_argument, NULL, 'd' },
{ "mode", required_argument, NULL, 'm' },
{ "unload", no_argument, NULL, 'u' },
{ 0, 0, NULL, 0 }
};
static void print_usage(char *argv[])
{
int i;
printf("Usage:\n");
printf("%s\n", argv[0]);
for (i = 0; options[i].name != 0; i++) {
printf(" --%-12s", options[i].name);
if (options[i].flag != NULL)
printf(" flag (internal value:%d)", *options[i].flag);
else
printf(" short-option: -%c", options[i].val);
printf("\n");
}
printf("Example:\n");
printf("To load program:\n %s -i eth0 -d 10.0.0.1\n", argv[0]);
printf("To unload program:\n %s -i eth0 -u\n", argv[0]);
printf("\n");
}
static int xdp_link_detach(int ifindex, __u32 xdp_flags)
{
int err;
if ((err = bpf_set_link_xdp_fd(ifindex, -1, xdp_flags)) < 0) {
fprintf(stderr, "ERR: link set xdp unload failed (err=%d):%s\n",
err, strerror(-err));
return -1;
}
return 0;
}
int xdp_link_attach(int ifindex, __u32 xdp_flags, int prog_fd)
{
int err;
/* libbpf provide the XDP net_device link-level hook attach helper */
err = bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags);
if (err < 0) {
fprintf(stderr,
"ERR: "
"ifindex(%d) link set xdp fd failed (%d): %s\n",
ifindex, -err, strerror(-err));
switch (-err) {
case EBUSY:
case EEXIST:
fprintf(stderr, "Hint: XDP already loaded on device\n");
break;
case EOPNOTSUPP:
fprintf(stderr, "Hint: Native-XDP not supported\n");
break;
default:
break;
}
return -1;
}
return 0;
}
/* User program takes two or three arguments
* interface name, relay server IP and prog
* unload flag
*/
int main(int argc, char **argv)
{
char filename[256] = "dhcp_kern_xdp.o";
int prog_fd, err;
int opt;
__u32 xdp_flags = XDP_FLAGS_DRV_MODE;
char dev[IF_NAMESIZE] = "";
bool do_unload = 0;
struct bpf_map *map = NULL;
struct bpf_obj *obj = NULL;
int map_fd;
int key = 0;
char server[15] = "";
struct in_addr addr;
__u16 ifindex;
while ((opt = getopt_long(argc, argv, "hui:d:m:", options, NULL)) !=
-1) {
switch (opt) {
case 'i':
strncpy(dev, optarg, IF_NAMESIZE);
dev[IF_NAMESIZE - 1] = '\0';
ifindex = if_nametoindex(dev);
if (ifindex <= 0) {
printf("Couldn't find ifname:%s \n", dev);
return -EINVAL;
}
break;
case 'd':
if (inet_aton(optarg, &addr) == 0) {
fprintf(stderr,
"Couldn't validate IP address:%s\n",
optarg);
return -EINVAL;
}
break;
case 'm':
if (strcmp(optarg, "skb") == 0) {
xdp_flags = XDP_FLAGS_SKB_MODE;
} else if (strcmp(optarg, "drv") != 0) {
fprintf(stderr, "Invalid mode: %s\n", optarg);
return -EINVAL;
}
break;
case 'u':
do_unload = 1;
break;
case 'h':
print_usage(argv);
exit(0);
default:
fprintf(stderr, "Unknown option %s\n", argv[optind]);
return -EINVAL;
}
}
if (do_unload)
return xdp_link_detach(ifindex, xdp_flags);
/* Load the BPF-ELF object file and get back first BPF_prog FD */
err = bpf_prog_load(filename, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (err) {
fprintf(stderr, "ERR: loading BPF-OBJ file(%s) (%d): %s\n",
filename, err, strerror(-err));
return -1;
}
if (prog_fd <= 0) {
printf("ERR: loading file: %s\n");
return -1;
}
/* read the map from prog object file and update the real
* server IP to the map
*/
map = bpf_object__find_map_by_name(obj, SERVER_MAP);
err = libbpf_get_error(map);
if (err) {
fprintf(stderr, "Could not find map %s in %s: %s\n", SERVER_MAP,
XDP_OBJ, strerror(err));
map = NULL;
exit(-1);
}
map_fd = bpf_map__fd(map);
if (map_fd < 0) {
fprintf(stderr, "Could not get map fd\n");
exit(-1);
}
err = bpf_map_update_elem(map_fd, &key, &addr.s_addr, BPF_ANY);
if (err) {
fprintf(stderr, "Could not update map %s in %s\n", SERVER_MAP,
XDP_OBJ);
exit(-1);
}
err = xdp_link_attach(ifindex, xdp_flags, prog_fd);
if (err)
return err;
printf("Success: Loading xdp program\n");
return 0;
}

20
headers/README.md Normal file
View File

@ -0,0 +1,20 @@
# NOTICE
This directory contains include header files needed to compile BPF programs.
The files are either copied from the kernel source (in subdir [linux/](linux))
or "shadow" files that contain useful defines that are often used in kernel
headers.
For example [bpf/compiler.h](bpf/compiler.h) contains practical compile macros
like `READ_ONCE` and `WRITE_ONCE` with verifier workarounds via
`bpf_barrier()`. And the `likely()` + `unlikely()` annotations.
The include file [linux/bpf.h](linux/bpf.h) is the most central file that all
BPF (kernel-side) programs include. It is maintained in this directory,
because this project knows what BPF features it uses, which makes the update
cycle tied to the project itself. We prefer not to depend on the OS distro
kernel headers version of this file. (Hint, due to the use of `enum` instead of
`define` the usual macro C-preprocessor define detection will not work. This is
done on purpose to discourage userspace from detecting features via header file
defines.).

View File

@ -0,0 +1,120 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __BPF_TRACE_HELPERS_H
#define __BPF_TRACE_HELPERS_H
#include <bpf/bpf_helpers.h>
#define ___bpf_concat(a, b) a ## b
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
#define ___bpf_narg(...) \
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define ___bpf_empty(...) \
___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
#define ___bpf_ctx_cast0() ctx
#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
#define ___bpf_ctx_cast(args...) \
___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
/*
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
* similar kinds of BPF programs, that accept input arguments as a single
* pointer to untyped u64 array, where each u64 can actually be a typed
* pointer or integer of different size. Instead of requring user to write
* manual casts and work with array elements by index, BPF_PROG macro
* allows user to declare a list of named and typed input arguments in the
* same syntax as for normal C function. All the casting is hidden and
* performed transparently, while user code can just assume working with
* function arguments of specified type and name.
*
* Original raw context argument is preserved as well as 'ctx' argument.
* This is useful when using BPF helpers that expect original context
* as one of the parameters (e.g., for bpf_perf_event_output()).
*/
#define BPF_PROG(name, args...) \
name(unsigned long long *ctx); \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args); \
typeof(name(0)) name(unsigned long long *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_ctx_cast(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args)
struct pt_regs;
#define ___bpf_kprobe_args0() ctx
#define ___bpf_kprobe_args1(x) \
___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
#define ___bpf_kprobe_args2(x, args...) \
___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
#define ___bpf_kprobe_args3(x, args...) \
___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
#define ___bpf_kprobe_args4(x, args...) \
___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
#define ___bpf_kprobe_args5(x, args...) \
___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
#define ___bpf_kprobe_args(args...) \
___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
/*
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
* tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific
* low-level way of getting kprobe input arguments from struct pt_regs, and
* provides a familiar typed and named function arguments syntax and
* semantics of accessing kprobe input paremeters.
*
* Original struct pt_regs* context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
*/
#define BPF_KPROBE(name, args...) \
name(struct pt_regs *ctx); \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_kprobe_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
#define ___bpf_kretprobe_args0() ctx
#define ___bpf_kretprobe_argsN(x, args...) \
___bpf_kprobe_args(args), (void *)PT_REGS_RET(ctx)
#define ___bpf_kretprobe_args(args...) \
___bpf_apply(___bpf_kretprobe_args, ___bpf_empty(args))(args)
/*
* BPF_KRETPROBE is similar to BPF_KPROBE, except, in addition to listing all
* input kprobe arguments, one last extra argument has to be specified, which
* captures kprobe return value.
*/
#define BPF_KRETPROBE(name, args...) \
name(struct pt_regs *ctx); \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args);\
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
return ____##name(___bpf_kretprobe_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
#endif

124
headers/bpf/compiler.h Normal file
View File

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2020 Authors of Cilium */
#ifndef __BPF_COMPILER_H_
#define __BPF_COMPILER_H_
#ifndef __non_bpf_context
# include "stddef.h"
#endif
#ifndef __section
# define __section(X) __attribute__((section(X), used))
#endif
#ifndef __maybe_unused
# define __maybe_unused __attribute__((__unused__))
#endif
#ifndef offsetof
# define offsetof(T, M) __builtin_offsetof(T, M)
#endif
#ifndef field_sizeof
# define field_sizeof(T, M) sizeof((((T *)NULL)->M))
#endif
#ifndef __packed
# define __packed __attribute__((packed))
#endif
#ifndef __nobuiltin
# if __clang_major__ >= 10
# define __nobuiltin(X) __attribute__((no_builtin(X)))
# else
# define __nobuiltin(X)
# endif
#endif
#ifndef likely
# define likely(X) __builtin_expect(!!(X), 1)
#endif
#ifndef unlikely
# define unlikely(X) __builtin_expect(!!(X), 0)
#endif
#ifndef always_succeeds /* Mainly for documentation purpose. */
# define always_succeeds(X) likely(X)
#endif
#undef __always_inline /* stddef.h defines its own */
#define __always_inline inline __attribute__((always_inline))
#ifndef __stringify
# define __stringify(X) #X
#endif
#ifndef __fetch
# define __fetch(X) (__u32)(__u64)(&(X))
#endif
#ifndef __aligned
# define __aligned(X) __attribute__((aligned(X)))
#endif
#ifndef build_bug_on
# define build_bug_on(E) ((void)sizeof(char[1 - 2*!!(E)]))
#endif
#ifndef __throw_build_bug
# define __throw_build_bug() __builtin_trap()
#endif
#ifndef __printf
# define __printf(X, Y) __attribute__((__format__(printf, X, Y)))
#endif
#ifndef barrier
# define barrier() asm volatile("": : :"memory")
#endif
#ifndef barrier_data
# define barrier_data(ptr) asm volatile("": :"r"(ptr) :"memory")
#endif
static __always_inline void bpf_barrier(void)
{
/* Workaround to avoid verifier complaint:
* "dereference of modified ctx ptr R5 off=48+0, ctx+const is allowed,
* ctx+const+const is not"
*/
barrier();
}
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
#endif
#ifndef __READ_ONCE
# define __READ_ONCE(X) (*(volatile typeof(X) *)&X)
#endif
#ifndef __WRITE_ONCE
# define __WRITE_ONCE(X, V) (*(volatile typeof(X) *)&X) = (V)
#endif
/* {READ,WRITE}_ONCE() with verifier workaround via bpf_barrier(). */
#ifndef READ_ONCE
# define READ_ONCE(X) \
({ typeof(X) __val = __READ_ONCE(X); \
bpf_barrier(); \
__val; })
#endif
#ifndef WRITE_ONCE
# define WRITE_ONCE(X, V) \
({ typeof(X) __val = (V); \
__WRITE_ONCE(X, __val); \
bpf_barrier(); \
__val; })
#endif
#endif /* __BPF_COMPILER_H_ */

13
headers/linux/README.md Normal file
View File

@ -0,0 +1,13 @@
# NOTICE
This directory contains include header files **copied from the Linux
kernel tree**. Some of them have been modified to ease compiling BPF
programs.
Linux distributions usually have a 'kernel-headers' software package
that also contain these files. As BPF is in rapid development the
distro version of these header files might not be new enough to
contain the features needed by this git repo.
Thus, we maintain a copy of these header files to match the features
used by our software.

5154
headers/linux/bpf.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_BPF_COMMON_H__
#define __LINUX_BPF_COMMON_H__
/* Instruction classes */
#define BPF_CLASS(code) ((code) & 0x07)
#define BPF_LD 0x00
#define BPF_LDX 0x01
#define BPF_ST 0x02
#define BPF_STX 0x03
#define BPF_ALU 0x04
#define BPF_JMP 0x05
#define BPF_RET 0x06
#define BPF_MISC 0x07
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
#define BPF_W 0x00 /* 32-bit */
#define BPF_H 0x08 /* 16-bit */
#define BPF_B 0x10 /* 8-bit */
/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
#define BPF_IND 0x40
#define BPF_MEM 0x60
#define BPF_LEN 0x80
#define BPF_MSH 0xa0
/* alu/jmp fields */
#define BPF_OP(code) ((code) & 0xf0)
#define BPF_ADD 0x00
#define BPF_SUB 0x10
#define BPF_MUL 0x20
#define BPF_DIV 0x30
#define BPF_OR 0x40
#define BPF_AND 0x50
#define BPF_LSH 0x60
#define BPF_RSH 0x70
#define BPF_NEG 0x80
#define BPF_MOD 0x90
#define BPF_XOR 0xa0
#define BPF_JA 0x00
#define BPF_JEQ 0x10
#define BPF_JGT 0x20
#define BPF_JGE 0x30
#define BPF_JSET 0x40
#define BPF_SRC(code) ((code) & 0x08)
#define BPF_K 0x00
#define BPF_X 0x08
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif
#endif /* __LINUX_BPF_COMMON_H__ */

172
headers/linux/btf.h Normal file
View File

@ -0,0 +1,172 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (c) 2018 Facebook */
#ifndef __LINUX_BTF_H__
#define __LINUX_BTF_H__
#include <linux/types.h>
#define BTF_MAGIC 0xeB9F
#define BTF_VERSION 1
struct btf_header {
__u16 magic;
__u8 version;
__u8 flags;
__u32 hdr_len;
/* All offsets are in bytes relative to the end of this header */
__u32 type_off; /* offset of type section */
__u32 type_len; /* length of type section */
__u32 str_off; /* offset of string section */
__u32 str_len; /* length of string section */
};
/* Max # of type identifier */
#define BTF_MAX_TYPE 0x000fffff
/* Max offset into the string section */
#define BTF_MAX_NAME_OFFSET 0x00ffffff
/* Max # of struct/union/enum members or func args */
#define BTF_MAX_VLEN 0xffff
struct btf_type {
__u32 name_off;
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
* bits 28-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC, FUNC_PROTO and VAR.
* "type" is a type_id referring to another type.
*/
union {
__u32 size;
__u32 type;
};
};
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
#define BTF_KIND_PTR 2 /* Pointer */
#define BTF_KIND_ARRAY 3 /* Array */
#define BTF_KIND_STRUCT 4 /* Struct */
#define BTF_KIND_UNION 5 /* Union */
#define BTF_KIND_ENUM 6 /* Enumeration */
#define BTF_KIND_FWD 7 /* Forward */
#define BTF_KIND_TYPEDEF 8 /* Typedef */
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
#define BTF_KIND_VAR 14 /* Variable */
#define BTF_KIND_DATASEC 15 /* Section */
#define BTF_KIND_MAX BTF_KIND_DATASEC
#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
*/
/* BTF_KIND_INT is followed by a u32 and the following
* is the 32 bits arrangement:
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
#define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16)
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
#define BTF_INT_SIGNED (1 << 0)
#define BTF_INT_CHAR (1 << 1)
#define BTF_INT_BOOL (1 << 2)
/* BTF_KIND_ENUM is followed by multiple "struct btf_enum".
* The exact number of btf_enum is stored in the vlen (of the
* info in "struct btf_type").
*/
struct btf_enum {
__u32 name_off;
__s32 val;
};
/* BTF_KIND_ARRAY is followed by one "struct btf_array" */
struct btf_array {
__u32 type;
__u32 index_type;
__u32 nelems;
};
/* BTF_KIND_STRUCT and BTF_KIND_UNION are followed
* by multiple "struct btf_member". The exact number
* of btf_member is stored in the vlen (of the info in
* "struct btf_type").
*/
struct btf_member {
__u32 name_off;
__u32 type;
/* If the type info kind_flag is set, the btf_member offset
* contains both member bitfield size and bit offset. The
* bitfield size is set for bitfield members. If the type
* info kind_flag is not set, the offset contains only bit
* offset.
*/
__u32 offset;
};
/* If the struct/union type info kind_flag is set, the
* following two macros are used to access bitfield_size
* and bit_offset from btf_member.offset.
*/
#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
* The exact number of btf_param is stored in the vlen (of the
* info in "struct btf_type").
*/
struct btf_param {
__u32 name_off;
__u32 type;
};
enum {
BTF_VAR_STATIC = 0,
BTF_VAR_GLOBAL_ALLOCATED = 1,
BTF_VAR_GLOBAL_EXTERN = 2,
};
enum btf_func_linkage {
BTF_FUNC_STATIC = 0,
BTF_FUNC_GLOBAL = 1,
BTF_FUNC_EXTERN = 2,
};
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
* additional information related to the variable such as its linkage.
*/
struct btf_var {
__u32 linkage;
};
/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo"
* to describe all BTF_KIND_VAR types it contains along with it's
* in-section offset as well as size.
*/
struct btf_var_secinfo {
__u32 type;
__u32 offset;
__u32 size;
};
#endif /* __LINUX_BTF_H__ */

34
headers/linux/err.h Normal file
View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __LINUX_ERR_H
#define __LINUX_ERR_H
#include <stdbool.h>
#include <linux/types.h>
#include <asm/errno.h>
#define MAX_ERRNO 4095
#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
static inline void * ERR_PTR(long error_)
{
return (void *) error_;
}
static inline long PTR_ERR(const void *ptr)
{
return (long) ptr;
}
static inline bool IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline bool IS_ERR_OR_NULL(const void *ptr)
{
return (!ptr) || IS_ERR_VALUE((unsigned long)ptr);
}
#endif

1249
headers/linux/if_link.h Normal file

File diff suppressed because it is too large Load Diff

111
headers/linux/if_xdp.h Normal file
View File

@ -0,0 +1,111 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* if_xdp: XDP socket user-space interface
* Copyright(c) 2018 Intel Corporation.
*
* Author(s): Björn Töpel <bjorn.topel@intel.com>
* Magnus Karlsson <magnus.karlsson@intel.com>
*/
#ifndef _LINUX_IF_XDP_H
#define _LINUX_IF_XDP_H
#include <linux/types.h>
/* Options for the sxdp_flags field */
#define XDP_SHARED_UMEM (1 << 0)
#define XDP_COPY (1 << 1) /* Force copy-mode */
#define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */
/* If this option is set, the driver might go sleep and in that case
* the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be
* set. If it is set, the application need to explicitly wake up the
* driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are
* running the driver and the application on the same core, you should
* use this option so that the kernel will yield to the user space
* application.
*/
#define XDP_USE_NEED_WAKEUP (1 << 3)
/* Flags for xsk_umem_config flags */
#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
struct sockaddr_xdp {
__u16 sxdp_family;
__u16 sxdp_flags;
__u32 sxdp_ifindex;
__u32 sxdp_queue_id;
__u32 sxdp_shared_umem_fd;
};
/* XDP_RING flags */
#define XDP_RING_NEED_WAKEUP (1 << 0)
struct xdp_ring_offset {
__u64 producer;
__u64 consumer;
__u64 desc;
__u64 flags;
};
struct xdp_mmap_offsets {
struct xdp_ring_offset rx;
struct xdp_ring_offset tx;
struct xdp_ring_offset fr; /* Fill */
struct xdp_ring_offset cr; /* Completion */
};
/* XDP socket options */
#define XDP_MMAP_OFFSETS 1
#define XDP_RX_RING 2
#define XDP_TX_RING 3
#define XDP_UMEM_REG 4
#define XDP_UMEM_FILL_RING 5
#define XDP_UMEM_COMPLETION_RING 6
#define XDP_STATISTICS 7
#define XDP_OPTIONS 8
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
__u64 len; /* Length of packet data area */
__u32 chunk_size;
__u32 headroom;
__u32 flags;
};
struct xdp_statistics {
__u64 rx_dropped; /* Dropped for other reasons */
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
__u64 rx_ring_full; /* Dropped due to rx ring being full */
__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
};
struct xdp_options {
__u32 flags;
};
/* Flags for the flags field of struct xdp_options */
#define XDP_OPTIONS_ZEROCOPY (1 << 0)
/* Pgoff for mmaping the rings */
#define XDP_PGOFF_RX_RING 0
#define XDP_PGOFF_TX_RING 0x80000000
#define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL
#define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL
/* Masks for unaligned chunks mode */
#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48
#define XSK_UNALIGNED_BUF_ADDR_MASK \
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;
__u32 len;
__u32 options;
};
/* UMEM descriptor is __u64 */
#endif /* _LINUX_IF_XDP_H */

355
headers/linux/netlink.h Normal file
View File

@ -0,0 +1,355 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_NETLINK_H
#define __LINUX_NETLINK_H
#include <linux/const.h>
#include <linux/socket.h> /* for __kernel_sa_family_t */
#include <linux/types.h>
#define NETLINK_ROUTE 0 /* Routing/device hook */
#define NETLINK_UNUSED 1 /* Unused number */
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
#define NETLINK_FIREWALL 3 /* Unused number, formerly ip_queue */
#define NETLINK_SOCK_DIAG 4 /* socket monitoring */
#define NETLINK_NFLOG 5 /* netfilter/iptables ULOG */
#define NETLINK_XFRM 6 /* ipsec */
#define NETLINK_SELINUX 7 /* SELinux event notifications */
#define NETLINK_ISCSI 8 /* Open-iSCSI */
#define NETLINK_AUDIT 9 /* auditing */
#define NETLINK_FIB_LOOKUP 10
#define NETLINK_CONNECTOR 11
#define NETLINK_NETFILTER 12 /* netfilter subsystem */
#define NETLINK_IP6_FW 13
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
/* leave room for NETLINK_DM (DM Events) */
#define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */
#define NETLINK_ECRYPTFS 19
#define NETLINK_RDMA 20
#define NETLINK_CRYPTO 21 /* Crypto layer */
#define NETLINK_SMC 22 /* SMC monitoring */
#define NETLINK_INET_DIAG NETLINK_SOCK_DIAG
#define MAX_LINKS 32
struct sockaddr_nl {
__kernel_sa_family_t nl_family; /* AF_NETLINK */
unsigned short nl_pad; /* zero */
__u32 nl_pid; /* port ID */
__u32 nl_groups; /* multicast groups mask */
};
struct nlmsghdr {
__u32 nlmsg_len; /* Length of message including header */
__u16 nlmsg_type; /* Message content */
__u16 nlmsg_flags; /* Additional flags */
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sending process port ID */
};
/* Flags values */
#define NLM_F_REQUEST 0x01 /* It is request message. */
#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
#define NLM_F_ECHO 0x08 /* Echo this request */
#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
#define NLM_F_MATCH 0x200 /* return all matching */
#define NLM_F_ATOMIC 0x400 /* atomic GET */
#define NLM_F_DUMP (NLM_F_ROOT|NLM_F_MATCH)
/* Modifiers to NEW request */
#define NLM_F_REPLACE 0x100 /* Override existing */
#define NLM_F_EXCL 0x200 /* Do not touch, if it exists */
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
/* Modifiers to DELETE request */
#define NLM_F_NONREC 0x100 /* Do not delete recursively */
/* Flags for ACK message */
#define NLM_F_CAPPED 0x100 /* request was capped */
#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
True CHANGE NLM_F_CREATE|NLM_F_REPLACE
Append NLM_F_CREATE
Check NLM_F_EXCL
*/
#define NLMSG_ALIGNTO 4U
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
(struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((len) >= (int)sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len))
#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len)))
#define NLMSG_NOOP 0x1 /* Nothing. */
#define NLMSG_ERROR 0x2 /* Error */
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
struct nlmsgerr {
int error;
struct nlmsghdr msg;
/*
* followed by the message contents unless NETLINK_CAP_ACK was set
* or the ACK indicates success (error == 0)
* message length is aligned with NLMSG_ALIGN()
*/
/*
* followed by TLVs defined in enum nlmsgerr_attrs
* if NETLINK_EXT_ACK was set
*/
};
/**
* enum nlmsgerr_attrs - nlmsgerr attributes
* @NLMSGERR_ATTR_UNUSED: unused
* @NLMSGERR_ATTR_MSG: error message string (string)
* @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original
* message, counting from the beginning of the header (u32)
* @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
* be used - in the success case - to identify a created
* object or operation or similar (binary)
* @NLMSGERR_ATTR_POLICY: policy for a rejected attribute
* @__NLMSGERR_ATTR_MAX: number of attributes
* @NLMSGERR_ATTR_MAX: highest attribute number
*/
enum nlmsgerr_attrs {
NLMSGERR_ATTR_UNUSED,
NLMSGERR_ATTR_MSG,
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
NLMSGERR_ATTR_POLICY,
__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
};
#define NETLINK_ADD_MEMBERSHIP 1
#define NETLINK_DROP_MEMBERSHIP 2
#define NETLINK_PKTINFO 3
#define NETLINK_BROADCAST_ERROR 4
#define NETLINK_NO_ENOBUFS 5
#define NETLINK_RX_RING 6
#define NETLINK_TX_RING 7
#define NETLINK_LISTEN_ALL_NSID 8
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
#define NETLINK_GET_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
};
struct nl_mmap_req {
unsigned int nm_block_size;
unsigned int nm_block_nr;
unsigned int nm_frame_size;
unsigned int nm_frame_nr;
};
struct nl_mmap_hdr {
unsigned int nm_status;
unsigned int nm_len;
__u32 nm_group;
/* credentials */
__u32 nm_pid;
__u32 nm_uid;
__u32 nm_gid;
};
enum nl_mmap_status {
NL_MMAP_STATUS_UNUSED,
NL_MMAP_STATUS_RESERVED,
NL_MMAP_STATUS_VALID,
NL_MMAP_STATUS_COPY,
NL_MMAP_STATUS_SKIP,
};
#define NL_MMAP_MSG_ALIGNMENT NLMSG_ALIGNTO
#define NL_MMAP_MSG_ALIGN(sz) __ALIGN_KERNEL(sz, NL_MMAP_MSG_ALIGNMENT)
#define NL_MMAP_HDRLEN NL_MMAP_MSG_ALIGN(sizeof(struct nl_mmap_hdr))
#define NET_MAJOR 36 /* Major 36 is reserved for networking */
enum {
NETLINK_UNCONNECTED = 0,
NETLINK_CONNECTED,
};
/*
* <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)-->
* +---------------------+- - -+- - - - - - - - - -+- - -+
* | Header | Pad | Payload | Pad |
* | (struct nlattr) | ing | | ing |
* +---------------------+- - -+- - - - - - - - - -+- - -+
* <-------------- nlattr->nla_len -------------->
*/
struct nlattr {
__u16 nla_len;
__u16 nla_type;
};
/*
* nla_type (16 bits)
* +---+---+-------------------------------+
* | N | O | Attribute Type |
* +---+---+-------------------------------+
* N := Carries nested attributes
* O := Payload stored in network byte order
*
* Note: The N and O flag are mutually exclusive.
*/
#define NLA_F_NESTED (1 << 15)
#define NLA_F_NET_BYTEORDER (1 << 14)
#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER)
#define NLA_ALIGNTO 4
#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
/* Generic 32 bitflags attribute content sent to the kernel.
*
* The value is a bitmap that defines the values being set
* The selector is a bitmask that defines which value is legit
*
* Examples:
* value = 0x0, and selector = 0x1
* implies we are selecting bit 1 and we want to set its value to 0.
*
* value = 0x2, and selector = 0x2
* implies we are selecting bit 2 and we want to set its value to 1.
*
*/
struct nla_bitfield32 {
__u32 value;
__u32 selector;
};
/*
* policy descriptions - it's specific to each family how this is used
* Normally, it should be retrieved via a dump inside another attribute
* specifying where it applies.
*/
/**
* enum netlink_attribute_type - type of an attribute
* @NL_ATTR_TYPE_INVALID: unused
* @NL_ATTR_TYPE_FLAG: flag attribute (present/not present)
* @NL_ATTR_TYPE_U8: 8-bit unsigned attribute
* @NL_ATTR_TYPE_U16: 16-bit unsigned attribute
* @NL_ATTR_TYPE_U32: 32-bit unsigned attribute
* @NL_ATTR_TYPE_U64: 64-bit unsigned attribute
* @NL_ATTR_TYPE_S8: 8-bit signed attribute
* @NL_ATTR_TYPE_S16: 16-bit signed attribute
* @NL_ATTR_TYPE_S32: 32-bit signed attribute
* @NL_ATTR_TYPE_S64: 64-bit signed attribute
* @NL_ATTR_TYPE_BINARY: binary data, min/max length may be specified
* @NL_ATTR_TYPE_STRING: string, min/max length may be specified
* @NL_ATTR_TYPE_NUL_STRING: NUL-terminated string,
* min/max length may be specified
* @NL_ATTR_TYPE_NESTED: nested, i.e. the content of this attribute
* consists of sub-attributes. The nested policy and maxtype
* inside may be specified.
* @NL_ATTR_TYPE_NESTED_ARRAY: nested array, i.e. the content of this
* attribute contains sub-attributes whose type is irrelevant
* (just used to separate the array entries) and each such array
* entry has attributes again, the policy for those inner ones
* and the corresponding maxtype may be specified.
* @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute
*/
enum netlink_attribute_type {
NL_ATTR_TYPE_INVALID,
NL_ATTR_TYPE_FLAG,
NL_ATTR_TYPE_U8,
NL_ATTR_TYPE_U16,
NL_ATTR_TYPE_U32,
NL_ATTR_TYPE_U64,
NL_ATTR_TYPE_S8,
NL_ATTR_TYPE_S16,
NL_ATTR_TYPE_S32,
NL_ATTR_TYPE_S64,
NL_ATTR_TYPE_BINARY,
NL_ATTR_TYPE_STRING,
NL_ATTR_TYPE_NUL_STRING,
NL_ATTR_TYPE_NESTED,
NL_ATTR_TYPE_NESTED_ARRAY,
NL_ATTR_TYPE_BITFIELD32,
};
/**
* enum netlink_policy_type_attr - policy type attributes
* @NL_POLICY_TYPE_ATTR_UNSPEC: unused
* @NL_POLICY_TYPE_ATTR_TYPE: type of the attribute,
* &enum netlink_attribute_type (U32)
* @NL_POLICY_TYPE_ATTR_MIN_VALUE_S: minimum value for signed
* integers (S64)
* @NL_POLICY_TYPE_ATTR_MAX_VALUE_S: maximum value for signed
* integers (S64)
* @NL_POLICY_TYPE_ATTR_MIN_VALUE_U: minimum value for unsigned
* integers (U64)
* @NL_POLICY_TYPE_ATTR_MAX_VALUE_U: maximum value for unsigned
* integers (U64)
* @NL_POLICY_TYPE_ATTR_MIN_LENGTH: minimum length for binary
* attributes, no minimum if not given (U32)
* @NL_POLICY_TYPE_ATTR_MAX_LENGTH: maximum length for binary
* attributes, no maximum if not given (U32)
* @NL_POLICY_TYPE_ATTR_POLICY_IDX: sub policy for nested and
* nested array types (U32)
* @NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE: maximum sub policy
* attribute for nested and nested array types, this can
* in theory be < the size of the policy pointed to by
* the index, if limited inside the nesting (U32)
* @NL_POLICY_TYPE_ATTR_BITFIELD32_MASK: valid mask for the
* bitfield32 type (U32)
* @NL_POLICY_TYPE_ATTR_MASK: mask of valid bits for unsigned integers (U64)
* @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment
*/
enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_UNSPEC,
NL_POLICY_TYPE_ATTR_TYPE,
NL_POLICY_TYPE_ATTR_MIN_VALUE_S,
NL_POLICY_TYPE_ATTR_MAX_VALUE_S,
NL_POLICY_TYPE_ATTR_MIN_VALUE_U,
NL_POLICY_TYPE_ATTR_MAX_VALUE_U,
NL_POLICY_TYPE_ATTR_MIN_LENGTH,
NL_POLICY_TYPE_ATTR_MAX_LENGTH,
NL_POLICY_TYPE_ATTR_POLICY_IDX,
NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
NL_POLICY_TYPE_ATTR_PAD,
NL_POLICY_TYPE_ATTR_MASK,
/* keep last */
__NL_POLICY_TYPE_ATTR_MAX,
NL_POLICY_TYPE_ATTR_MAX = __NL_POLICY_TYPE_ATTR_MAX - 1
};
#endif /* __LINUX_NETLINK_H */

77
headers/linux/perf-sys.h Normal file
View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copied from $(LINUX)/tools/perf/perf-sys.h (kernel 4.18) */
#ifndef _PERF_SYS_H
#define _PERF_SYS_H
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
/*
* remove the following headers to allow for userspace program compilation
* #include <linux/compiler.h>
* #include <asm/barrier.h>
*/
#ifdef __powerpc__
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __s390__
#define CPUINFO_PROC {"vendor_id"}
#endif
#ifdef __sh__
#define CPUINFO_PROC {"cpu type"}
#endif
#ifdef __hppa__
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __sparc__
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __alpha__
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __arm__
#define CPUINFO_PROC {"model name", "Processor"}
#endif
#ifdef __mips__
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __arc__
#define CPUINFO_PROC {"Processor"}
#endif
#ifdef __xtensa__
#define CPUINFO_PROC {"core ID"}
#endif
#ifndef CPUINFO_PROC
#define CPUINFO_PROC { "model name", }
#endif
static inline int
sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
int fd;
fd = syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags);
#ifdef HAVE_ATTR_TEST
if (unlikely(test_attr__enabled))
test_attr__open(attr, pid, cpu, fd, group_fd, flags);
#endif
return fd;
}
#endif /* _PERF_SYS_H */

View File

@ -0,0 +1,38 @@
#ifndef __VMLINUX_ARCH_ARM64_H__
#define __VMLINUX_ARCH_ARM64_H__
#ifdef __BPF_TRACING_H__
/* Expected include <bpf/bpf_tracing.h> */
#ifndef bpf_target_defined
#warning "Tracing need __TARGET_ARCH_arm64 defined"
#endif
#endif /* __BPF_TRACING_H__ */
struct user_pt_regs {
__u64 regs[31];
__u64 sp;
__u64 pc;
__u64 pstate;
};
struct pt_regs {
union {
struct user_pt_regs user_regs;
struct {
u64 regs[31];
u64 sp;
u64 pc;
u64 pstate;
};
};
u64 orig_x0;
s32 syscallno;
u32 unused2;
u64 orig_addr_limit;
u64 pmr_save;
u64 stackframe[2];
u64 lockdep_hardirqs;
u64 exit_rcu;
};
#endif /* __VMLINUX_ARCH_ARM64_H__ */

View File

@ -0,0 +1,55 @@
#ifndef __VMLINUX_ARCH_POWERPC_H__
#define __VMLINUX_ARCH_POWERPC_H__
#ifdef __BPF_TRACING_H__
/* Expected include <bpf/bpf_tracing.h> */
#ifndef bpf_target_defined
#warning "Tracing need __TARGET_ARCH_powerpc defined"
#endif
#endif /* __BPF_TRACING_H__ */
struct user_pt_regs {
long unsigned int gpr[32];
long unsigned int nip;
long unsigned int msr;
long unsigned int orig_gpr3;
long unsigned int ctr;
long unsigned int link;
long unsigned int xer;
long unsigned int ccr;
long unsigned int softe;
long unsigned int trap;
long unsigned int dar;
long unsigned int dsisr;
long unsigned int result;
};
struct pt_regs {
union {
struct user_pt_regs user_regs;
struct {
long unsigned int gpr[32];
long unsigned int nip;
long unsigned int msr;
long unsigned int orig_gpr3;
long unsigned int ctr;
long unsigned int link;
long unsigned int xer;
long unsigned int ccr;
long unsigned int softe;
long unsigned int trap;
long unsigned int dar;
long unsigned int dsisr;
long unsigned int result;
};
};
union {
struct {
long unsigned int ppr;
long unsigned int kuap;
};
long unsigned int __pad[2];
};
};
#endif /* __VMLINUX_ARCH_POWERPC_H__ */

View File

@ -0,0 +1,35 @@
#ifndef __VMLINUX_ARCH_X86_H__
#define __VMLINUX_ARCH_X86_H__
#ifdef __BPF_TRACING_H__
/* Expected include <bpf/bpf_tracing.h> */
#ifndef bpf_target_defined
#warning "Tracing need __TARGET_ARCH_x86 defined"
#endif
#endif /* __BPF_TRACING_H__ */
struct pt_regs {
long unsigned int r15;
long unsigned int r14;
long unsigned int r13;
long unsigned int r12;
long unsigned int bp;
long unsigned int bx;
long unsigned int r11;
long unsigned int r10;
long unsigned int r9;
long unsigned int r8;
long unsigned int ax;
long unsigned int cx;
long unsigned int dx;
long unsigned int si;
long unsigned int di;
long unsigned int orig_ax;
long unsigned int ip;
long unsigned int cs;
long unsigned int flags;
long unsigned int sp;
long unsigned int ss;
};
#endif /* __VMLINUX_ARCH_X86_H__ */

View File

@ -0,0 +1,26 @@
#ifndef __VMLINUX_COMMON_H__
#define __VMLINUX_COMMON_H__
struct list_head {
struct list_head *next;
struct list_head *prev;
};
struct rb_node {
long unsigned int __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
};
typedef struct {
int counter;
} atomic_t;
struct refcount_struct {
atomic_t refs;
};
typedef struct refcount_struct refcount_t;
#endif /* __VMLINUX_COMMON_H__ */

View File

@ -0,0 +1,138 @@
#ifndef __VMLINUX_NET_H__
#define __VMLINUX_NET_H__
typedef __u32 __wsum;
typedef unsigned int sk_buff_data_t; // Assumes 64-bit. FIXME see below
/*
// BITS_PER_LONG can be wrong with -target bpf
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif
#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif
*/
struct sk_buff {
union {
struct {
struct sk_buff *next;
struct sk_buff *prev;
union {
struct net_device *dev;
long unsigned int dev_scratch;
};
};
struct rb_node rbnode;
struct list_head list;
};
union {
struct sock *sk;
int ip_defrag_offset;
};
union {
ktime_t tstamp;
u64 skb_mstamp_ns;
};
char cb[48];
union {
struct {
long unsigned int _skb_refdst;
void (*destructor)(struct sk_buff *);
};
struct list_head tcp_tsorted_anchor;
};
long unsigned int _nfct;
unsigned int len;
unsigned int data_len;
__u16 mac_len;
__u16 hdr_len;
__u16 queue_mapping;
__u8 __cloned_offset[0];
__u8 cloned: 1;
__u8 nohdr: 1;
__u8 fclone: 2;
__u8 peeked: 1;
__u8 head_frag: 1;
__u8 pfmemalloc: 1;
__u8 active_extensions;
__u32 headers_start[0];
__u8 __pkt_type_offset[0];
__u8 pkt_type: 3;
__u8 ignore_df: 1;
__u8 nf_trace: 1;
__u8 ip_summed: 2;
__u8 ooo_okay: 1;
__u8 l4_hash: 1;
__u8 sw_hash: 1;
__u8 wifi_acked_valid: 1;
__u8 wifi_acked: 1;
__u8 no_fcs: 1;
__u8 encapsulation: 1;
__u8 encap_hdr_csum: 1;
__u8 csum_valid: 1;
__u8 __pkt_vlan_present_offset[0];
__u8 vlan_present: 1;
__u8 csum_complete_sw: 1;
__u8 csum_level: 2;
__u8 csum_not_inet: 1;
__u8 dst_pending_confirm: 1;
__u8 ndisc_nodetype: 2;
__u8 ipvs_property: 1;
__u8 inner_protocol_type: 1;
__u8 remcsum_offload: 1;
__u8 offload_fwd_mark: 1;
__u8 offload_l3_fwd_mark: 1;
__u8 tc_skip_classify: 1;
__u8 tc_at_ingress: 1;
__u8 redirected: 1;
__u8 from_ingress: 1;
__u8 decrypted: 1;
__u16 tc_index;
union {
__wsum csum;
struct {
__u16 csum_start;
__u16 csum_offset;
};
};
__u32 priority;
int skb_iif;
__u32 hash;
__be16 vlan_proto;
__u16 vlan_tci;
union {
unsigned int napi_id;
unsigned int sender_cpu;
};
__u32 secmark;
union {
__u32 mark;
__u32 reserved_tailroom;
};
union {
__be16 inner_protocol;
__u8 inner_ipproto;
};
__u16 inner_transport_header;
__u16 inner_network_header;
__u16 inner_mac_header;
__be16 protocol;
__u16 transport_header;
__u16 network_header;
__u16 mac_header;
__u32 headers_end[0];
sk_buff_data_t tail;
sk_buff_data_t end;
unsigned char *head;
unsigned char *data;
unsigned int truesize;
refcount_t users;
struct skb_ext *extensions;
};
#endif /* __VMLINUX_NET_H__ */

View File

@ -0,0 +1,14 @@
#ifndef __VMLINUX_TYPES_H__
#define __VMLINUX_TYPES_H__
typedef __u8 u8;
typedef __s16 s16;
typedef __u16 u16;
typedef __s32 s32;
typedef __u32 u32;
typedef __s64 s64;
typedef __u64 u64;
typedef s64 ktime_t;
#endif /* __VMLINUX_TYPES_H__ */

28
headers/vmlinux_local.h Normal file
View File

@ -0,0 +1,28 @@
/*
* WARNING: This file shadow vmlinux.h that you can generate yourself
*
* Cmdline to generate vmlinux.h
* bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h
*
* This vmlinux.h shadow contains kernel headers reduced to that were
* needed in this project.
*/
#ifndef __VMLINUX_H__
#define __VMLINUX_H__
#include <linux/types.h> /* Needed for __uNN in vmlinux/vmlinux_types.h */
#ifndef BPF_NO_PRESERVE_ACCESS_INDEX
#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)
#endif
#include "vmlinux/vmlinux_types.h"
#include "vmlinux/vmlinux_common.h"
#include "vmlinux/vmlinux_arch.h"
#include "vmlinux/vmlinux_net.h"
#ifndef BPF_NO_PRESERVE_ACCESS_INDEX
#pragma clang attribute pop
#endif
#endif /* __VMLINUX_H__ */

561
include/bpf/builtins.h Normal file
View File

@ -0,0 +1,561 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2020 Authors of Cilium */
#ifndef __BPF_BUILTINS__
#define __BPF_BUILTINS__
#include "compiler.h"
#ifndef __non_bpf_context
#ifndef lock_xadd
# define lock_xadd(P, V) ((void) __sync_fetch_and_add((P), (V)))
#endif
/* Unfortunately verifier forces aligned stack access while other memory
* do not have to be aligned (map, pkt, etc). Mark those on the /stack/
* for objects > 8 bytes in order to force-align such memcpy candidates
* when we really need them to be aligned, this is not needed for objects
* of size <= 8 bytes and in case of > 8 bytes /only/ when 8 byte is not
* the natural object alignment (e.g. __u8 foo[12]).
*/
#define __align_stack_8 __aligned(8)
/* Memory iterators used below. */
#define __it_bwd(x, op) (x -= sizeof(__u##op))
#define __it_fwd(x, op) (x += sizeof(__u##op))
/* Memory operators used below. */
#define __it_set(a, op) (*(__u##op *)__it_bwd(a, op)) = 0
#define __it_xor(a, b, r, op) r |= (*(__u##op *)__it_bwd(a, op)) ^ (*(__u##op *)__it_bwd(b, op))
#define __it_mob(a, b, op) (*(__u##op *)__it_bwd(a, op)) = (*(__u##op *)__it_bwd(b, op))
#define __it_mof(a, b, op) \
do { \
*(__u##op *)a = *(__u##op *)b; \
__it_fwd(a, op); __it_fwd(b, op); \
} while (0)
static __always_inline __maybe_unused void
__bpf_memset_builtin(void *d, __u8 c, __u64 len)
{
/* Everything non-zero or non-const (currently unsupported) as c
* gets handled here.
*/
__builtin_memset(d, c, len);
}
static __always_inline void __bpf_memzero(void *d, __u64 len)
{
#if __clang_major__ >= 10
if (!__builtin_constant_p(len))
__throw_build_bug();
d += len;
switch (len) {
case 96: __it_set(d, 64);
case 88: jmp_88: __it_set(d, 64);
case 80: jmp_80: __it_set(d, 64);
case 72: jmp_72: __it_set(d, 64);
case 64: jmp_64: __it_set(d, 64);
case 56: jmp_56: __it_set(d, 64);
case 48: jmp_48: __it_set(d, 64);
case 40: jmp_40: __it_set(d, 64);
case 32: jmp_32: __it_set(d, 64);
case 24: jmp_24: __it_set(d, 64);
case 16: jmp_16: __it_set(d, 64);
case 8: jmp_8: __it_set(d, 64);
break;
case 94: __it_set(d, 16); __it_set(d, 32); goto jmp_88;
case 86: __it_set(d, 16); __it_set(d, 32); goto jmp_80;
case 78: __it_set(d, 16); __it_set(d, 32); goto jmp_72;
case 70: __it_set(d, 16); __it_set(d, 32); goto jmp_64;
case 62: __it_set(d, 16); __it_set(d, 32); goto jmp_56;
case 54: __it_set(d, 16); __it_set(d, 32); goto jmp_48;
case 46: __it_set(d, 16); __it_set(d, 32); goto jmp_40;
case 38: __it_set(d, 16); __it_set(d, 32); goto jmp_32;
case 30: __it_set(d, 16); __it_set(d, 32); goto jmp_24;
case 22: __it_set(d, 16); __it_set(d, 32); goto jmp_16;
case 14: __it_set(d, 16); __it_set(d, 32); goto jmp_8;
case 6: __it_set(d, 16); __it_set(d, 32);
break;
case 92: __it_set(d, 32); goto jmp_88;
case 84: __it_set(d, 32); goto jmp_80;
case 76: __it_set(d, 32); goto jmp_72;
case 68: __it_set(d, 32); goto jmp_64;
case 60: __it_set(d, 32); goto jmp_56;
case 52: __it_set(d, 32); goto jmp_48;
case 44: __it_set(d, 32); goto jmp_40;
case 36: __it_set(d, 32); goto jmp_32;
case 28: __it_set(d, 32); goto jmp_24;
case 20: __it_set(d, 32); goto jmp_16;
case 12: __it_set(d, 32); goto jmp_8;
case 4: __it_set(d, 32);
break;
case 90: __it_set(d, 16); goto jmp_88;
case 82: __it_set(d, 16); goto jmp_80;
case 74: __it_set(d, 16); goto jmp_72;
case 66: __it_set(d, 16); goto jmp_64;
case 58: __it_set(d, 16); goto jmp_56;
case 50: __it_set(d, 16); goto jmp_48;
case 42: __it_set(d, 16); goto jmp_40;
case 34: __it_set(d, 16); goto jmp_32;
case 26: __it_set(d, 16); goto jmp_24;
case 18: __it_set(d, 16); goto jmp_16;
case 10: __it_set(d, 16); goto jmp_8;
case 2: __it_set(d, 16);
break;
case 1: __it_set(d, 8);
break;
default:
/* __builtin_memset() is crappy slow since it cannot
* make any assumptions about alignment & underlying
* efficient unaligned access on the target we're
* running.
*/
__throw_build_bug();
}
#else
__bpf_memset_builtin(d, 0, len);
#endif
}
static __always_inline __maybe_unused void
__bpf_no_builtin_memset(void *d __maybe_unused, __u8 c __maybe_unused,
__u64 len __maybe_unused)
{
__throw_build_bug();
}
/* Redirect any direct use in our code to throw an error. */
#define __builtin_memset __bpf_no_builtin_memset
static __always_inline __nobuiltin("memset") void memset(void *d, int c,
__u64 len)
{
if (__builtin_constant_p(len) && __builtin_constant_p(c) && c == 0)
__bpf_memzero(d, len);
else
__bpf_memset_builtin(d, c, len);
}
static __always_inline __maybe_unused void
__bpf_memcpy_builtin(void *d, const void *s, __u64 len)
{
/* Explicit opt-in for __builtin_memcpy(). */
__builtin_memcpy(d, s, len);
}
static __always_inline void __bpf_memcpy(void *d, const void *s, __u64 len)
{
#if __clang_major__ >= 10
if (!__builtin_constant_p(len))
__throw_build_bug();
d += len;
s += len;
switch (len) {
case 288: __it_mob(d, s, 64);
case 280: jmp_280: __it_mob(d, s, 64);
case 272: jmp_272: __it_mob(d, s, 64);
case 264: jmp_264: __it_mob(d, s, 64);
case 256: jmp_256: __it_mob(d, s, 64);
case 248: jmp_248: __it_mob(d, s, 64);
case 240: jmp_240: __it_mob(d, s, 64);
case 232: jmp_232: __it_mob(d, s, 64);
case 224: jmp_224: __it_mob(d, s, 64);
case 216: jmp_216: __it_mob(d, s, 64);
case 208: jmp_208: __it_mob(d, s, 64);
case 200: jmp_200: __it_mob(d, s, 64);
case 192: jmp_192: __it_mob(d, s, 64);
case 184: jmp_184: __it_mob(d, s, 64);
case 176: jmp_176: __it_mob(d, s, 64);
case 168: jmp_168: __it_mob(d, s, 64);
case 160: jmp_160: __it_mob(d, s, 64);
case 152: jmp_152: __it_mob(d, s, 64);
case 144: jmp_144: __it_mob(d, s, 64);
case 136: jmp_136: __it_mob(d, s, 64);
case 128: jmp_128: __it_mob(d, s, 64);
case 120: jmp_120: __it_mob(d, s, 64);
case 112: jmp_112: __it_mob(d, s, 64);
case 104: jmp_104: __it_mob(d, s, 64);
case 96: jmp_96: __it_mob(d, s, 64);
case 88: jmp_88: __it_mob(d, s, 64);
case 80: jmp_80: __it_mob(d, s, 64);
case 72: jmp_72: __it_mob(d, s, 64);
case 64: jmp_64: __it_mob(d, s, 64);
case 56: jmp_56: __it_mob(d, s, 64);
case 48: jmp_48: __it_mob(d, s, 64);
case 40: jmp_40: __it_mob(d, s, 64);
case 32: jmp_32: __it_mob(d, s, 64);
case 24: jmp_24: __it_mob(d, s, 64);
case 16: jmp_16: __it_mob(d, s, 64);
case 8: jmp_8: __it_mob(d, s, 64);
break;
case 286: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_280;
case 278: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_272;
case 270: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_264;
case 262: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_256;
case 254: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_248;
case 246: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_240;
case 238: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_232;
case 230: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_224;
case 222: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_216;
case 214: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_208;
case 206: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_200;
case 198: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_192;
case 190: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_184;
case 182: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_176;
case 174: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_168;
case 166: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_160;
case 158: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_152;
case 150: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_144;
case 142: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_136;
case 134: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_128;
case 126: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_120;
case 118: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_112;
case 110: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_104;
case 102: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_96;
case 94: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_88;
case 86: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_80;
case 78: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_72;
case 70: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_64;
case 62: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_56;
case 54: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_48;
case 46: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_40;
case 38: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_32;
case 30: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_24;
case 22: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_16;
case 14: __it_mob(d, s, 16); __it_mob(d, s, 32); goto jmp_8;
case 6: __it_mob(d, s, 16); __it_mob(d, s, 32);
break;
case 284: __it_mob(d, s, 32); goto jmp_280;
case 276: __it_mob(d, s, 32); goto jmp_272;
case 268: __it_mob(d, s, 32); goto jmp_264;
case 260: __it_mob(d, s, 32); goto jmp_256;
case 252: __it_mob(d, s, 32); goto jmp_248;
case 244: __it_mob(d, s, 32); goto jmp_240;
case 236: __it_mob(d, s, 32); goto jmp_232;
case 228: __it_mob(d, s, 32); goto jmp_224;
case 220: __it_mob(d, s, 32); goto jmp_216;
case 212: __it_mob(d, s, 32); goto jmp_208;
case 204: __it_mob(d, s, 32); goto jmp_200;
case 196: __it_mob(d, s, 32); goto jmp_192;
case 188: __it_mob(d, s, 32); goto jmp_184;
case 180: __it_mob(d, s, 32); goto jmp_176;
case 172: __it_mob(d, s, 32); goto jmp_168;
case 164: __it_mob(d, s, 32); goto jmp_160;
case 156: __it_mob(d, s, 32); goto jmp_152;
case 148: __it_mob(d, s, 32); goto jmp_144;
case 140: __it_mob(d, s, 32); goto jmp_136;
case 132: __it_mob(d, s, 32); goto jmp_128;
case 124: __it_mob(d, s, 32); goto jmp_120;
case 116: __it_mob(d, s, 32); goto jmp_112;
case 108: __it_mob(d, s, 32); goto jmp_104;
case 100: __it_mob(d, s, 32); goto jmp_96;
case 92: __it_mob(d, s, 32); goto jmp_88;
case 84: __it_mob(d, s, 32); goto jmp_80;
case 76: __it_mob(d, s, 32); goto jmp_72;
case 68: __it_mob(d, s, 32); goto jmp_64;
case 60: __it_mob(d, s, 32); goto jmp_56;
case 52: __it_mob(d, s, 32); goto jmp_48;
case 44: __it_mob(d, s, 32); goto jmp_40;
case 36: __it_mob(d, s, 32); goto jmp_32;
case 28: __it_mob(d, s, 32); goto jmp_24;
case 20: __it_mob(d, s, 32); goto jmp_16;
case 12: __it_mob(d, s, 32); goto jmp_8;
case 4: __it_mob(d, s, 32);
break;
case 282: __it_mob(d, s, 16); goto jmp_280;
case 274: __it_mob(d, s, 16); goto jmp_272;
case 266: __it_mob(d, s, 16); goto jmp_264;
case 258: __it_mob(d, s, 16); goto jmp_256;
case 250: __it_mob(d, s, 16); goto jmp_248;
case 242: __it_mob(d, s, 16); goto jmp_240;
case 234: __it_mob(d, s, 16); goto jmp_232;
case 226: __it_mob(d, s, 16); goto jmp_224;
case 218: __it_mob(d, s, 16); goto jmp_216;
case 210: __it_mob(d, s, 16); goto jmp_208;
case 202: __it_mob(d, s, 16); goto jmp_200;
case 194: __it_mob(d, s, 16); goto jmp_192;
case 186: __it_mob(d, s, 16); goto jmp_184;
case 178: __it_mob(d, s, 16); goto jmp_176;
case 170: __it_mob(d, s, 16); goto jmp_168;
case 162: __it_mob(d, s, 16); goto jmp_160;
case 154: __it_mob(d, s, 16); goto jmp_152;
case 146: __it_mob(d, s, 16); goto jmp_144;
case 138: __it_mob(d, s, 16); goto jmp_136;
case 130: __it_mob(d, s, 16); goto jmp_128;
case 122: __it_mob(d, s, 16); goto jmp_120;
case 114: __it_mob(d, s, 16); goto jmp_112;
case 106: __it_mob(d, s, 16); goto jmp_104;
case 98: __it_mob(d, s, 16); goto jmp_96;
case 90: __it_mob(d, s, 16); goto jmp_88;
case 82: __it_mob(d, s, 16); goto jmp_80;
case 74: __it_mob(d, s, 16); goto jmp_72;
case 66: __it_mob(d, s, 16); goto jmp_64;
case 58: __it_mob(d, s, 16); goto jmp_56;
case 50: __it_mob(d, s, 16); goto jmp_48;
case 42: __it_mob(d, s, 16); goto jmp_40;
case 34: __it_mob(d, s, 16); goto jmp_32;
case 26: __it_mob(d, s, 16); goto jmp_24;
case 18: __it_mob(d, s, 16); goto jmp_16;
case 10: __it_mob(d, s, 16); goto jmp_8;
case 2: __it_mob(d, s, 16);
break;
case 1: __it_mob(d, s, 8);
break;
default:
/* __builtin_memcpy() is crappy slow since it cannot
* make any assumptions about alignment & underlying
* efficient unaligned access on the target we're
* running.
*/
__throw_build_bug();
}
#else
__bpf_memcpy_builtin(d, s, len);
#endif
}
static __always_inline __maybe_unused void
__bpf_no_builtin_memcpy(void *d __maybe_unused, const void *s __maybe_unused,
__u64 len __maybe_unused)
{
__throw_build_bug();
}
/* Redirect any direct use in our code to throw an error. */
#define __builtin_memcpy __bpf_no_builtin_memcpy
static __always_inline __nobuiltin("memcpy") void memcpy(void *d, const void *s,
__u64 len)
{
return __bpf_memcpy(d, s, len);
}
static __always_inline __maybe_unused __u64
__bpf_memcmp_builtin(const void *x, const void *y, __u64 len)
{
/* Explicit opt-in for __builtin_memcmp(). We use the bcmp builtin
* here for two reasons: i) we only need to know equal or non-equal
* similar as in __bpf_memcmp(), and ii) if __bpf_memcmp() ends up
* selecting __bpf_memcmp_builtin(), clang generats a memcmp loop.
* That is, (*) -> __bpf_memcmp() -> __bpf_memcmp_builtin() ->
* __builtin_memcmp() -> memcmp() -> (*), meaning it will end up
* selecting our memcmp() from here. Remapping to __builtin_bcmp()
* breaks this loop and resolves both needs at once.
*/
return __builtin_bcmp(x, y, len);
}
static __always_inline __u64 __bpf_memcmp(const void *x, const void *y,
__u64 len)
{
#if __clang_major__ >= 10
__u64 r = 0;
if (!__builtin_constant_p(len))
__throw_build_bug();
x += len;
y += len;
switch (len) {
case 32: __it_xor(x, y, r, 64);
case 24: jmp_24: __it_xor(x, y, r, 64);
case 16: jmp_16: __it_xor(x, y, r, 64);
case 8: jmp_8: __it_xor(x, y, r, 64);
break;
case 30: __it_xor(x, y, r, 16); __it_xor(x, y, r, 32); goto jmp_24;
case 22: __it_xor(x, y, r, 16); __it_xor(x, y, r, 32); goto jmp_16;
case 14: __it_xor(x, y, r, 16); __it_xor(x, y, r, 32); goto jmp_8;
case 6: __it_xor(x, y, r, 16); __it_xor(x, y, r, 32);
break;
case 28: __it_xor(x, y, r, 32); goto jmp_24;
case 20: __it_xor(x, y, r, 32); goto jmp_16;
case 12: __it_xor(x, y, r, 32); goto jmp_8;
case 4: __it_xor(x, y, r, 32);
break;
case 26: __it_xor(x, y, r, 16); goto jmp_24;
case 18: __it_xor(x, y, r, 16); goto jmp_16;
case 10: __it_xor(x, y, r, 16); goto jmp_8;
case 2: __it_xor(x, y, r, 16);
break;
case 1: __it_xor(x, y, r, 8);
break;
default:
__throw_build_bug();
}
return r;
#else
return __bpf_memcmp_builtin(x, y, len);
#endif
}
static __always_inline __maybe_unused __u64
__bpf_no_builtin_memcmp(const void *x __maybe_unused,
const void *y __maybe_unused, __u64 len __maybe_unused)
{
__throw_build_bug();
return 0;
}
/* Redirect any direct use in our code to throw an error. */
#define __builtin_memcmp __bpf_no_builtin_memcmp
/* Modified for our needs in that we only return either zero (x and y
* are equal) or non-zero (x and y are non-equal).
*/
static __always_inline __nobuiltin("memcmp") __u64 memcmp(const void *x,
const void *y,
__u64 len)
{
return __bpf_memcmp(x, y, len);
}
static __always_inline __maybe_unused void
__bpf_memmove_builtin(void *d, const void *s, __u64 len)
{
/* Explicit opt-in for __builtin_memmove(). */
__builtin_memmove(d, s, len);
}
static __always_inline void __bpf_memmove_bwd(void *d, const void *s, __u64 len)
{
/* Our internal memcpy implementation walks backwards by default. */
__bpf_memcpy(d, s, len);
}
static __always_inline void __bpf_memmove_fwd(void *d, const void *s, __u64 len)
{
#if __clang_major__ >= 10
if (!__builtin_constant_p(len))
__throw_build_bug();
switch (len) {
case 96: __it_mof(d, s, 64);
case 88: jmp_88: __it_mof(d, s, 64);
case 80: jmp_80: __it_mof(d, s, 64);
case 72: jmp_72: __it_mof(d, s, 64);
case 64: jmp_64: __it_mof(d, s, 64);
case 56: jmp_56: __it_mof(d, s, 64);
case 48: jmp_48: __it_mof(d, s, 64);
case 40: jmp_40: __it_mof(d, s, 64);
case 32: jmp_32: __it_mof(d, s, 64);
case 24: jmp_24: __it_mof(d, s, 64);
case 16: jmp_16: __it_mof(d, s, 64);
case 8: jmp_8: __it_mof(d, s, 64);
break;
case 94: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_88;
case 86: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_80;
case 78: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_72;
case 70: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_64;
case 62: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_56;
case 54: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_48;
case 46: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_40;
case 38: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_32;
case 30: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_24;
case 22: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_16;
case 14: __it_mof(d, s, 16); __it_mof(d, s, 32); goto jmp_8;
case 6: __it_mof(d, s, 16); __it_mof(d, s, 32);
break;
case 92: __it_mof(d, s, 32); goto jmp_88;
case 84: __it_mof(d, s, 32); goto jmp_80;
case 76: __it_mof(d, s, 32); goto jmp_72;
case 68: __it_mof(d, s, 32); goto jmp_64;
case 60: __it_mof(d, s, 32); goto jmp_56;
case 52: __it_mof(d, s, 32); goto jmp_48;
case 44: __it_mof(d, s, 32); goto jmp_40;
case 36: __it_mof(d, s, 32); goto jmp_32;
case 28: __it_mof(d, s, 32); goto jmp_24;
case 20: __it_mof(d, s, 32); goto jmp_16;
case 12: __it_mof(d, s, 32); goto jmp_8;
case 4: __it_mof(d, s, 32);
break;
case 90: __it_mof(d, s, 16); goto jmp_88;
case 82: __it_mof(d, s, 16); goto jmp_80;
case 74: __it_mof(d, s, 16); goto jmp_72;
case 66: __it_mof(d, s, 16); goto jmp_64;
case 58: __it_mof(d, s, 16); goto jmp_56;
case 50: __it_mof(d, s, 16); goto jmp_48;
case 42: __it_mof(d, s, 16); goto jmp_40;
case 34: __it_mof(d, s, 16); goto jmp_32;
case 26: __it_mof(d, s, 16); goto jmp_24;
case 18: __it_mof(d, s, 16); goto jmp_16;
case 10: __it_mof(d, s, 16); goto jmp_8;
case 2: __it_mof(d, s, 16);
break;
case 1: __it_mof(d, s, 8);
break;
default:
/* __builtin_memmove() is crappy slow since it cannot
* make any assumptions about alignment & underlying
* efficient unaligned access on the target we're
* running.
*/
__throw_build_bug();
}
#else
__bpf_memmove_builtin(d, s, len);
#endif
}
static __always_inline __maybe_unused void
__bpf_no_builtin_memmove(void *d __maybe_unused, const void *s __maybe_unused,
__u64 len __maybe_unused)
{
__throw_build_bug();
}
/* Redirect any direct use in our code to throw an error. */
#define __builtin_memmove __bpf_no_builtin_memmove
static __always_inline void __bpf_memmove(void *d, const void *s, __u64 len)
{
/* Note, the forward walking memmove() might not work with on-stack data
* since we'll end up walking the memory unaligned even when __align_stack_8
* is set. Should not matter much since we'll use memmove() mostly or only
* on pkt data.
*
* Example with d, s, len = 12 bytes:
* * __bpf_memmove_fwd() emits: mov_32 d[0],s[0]; mov_64 d[4],s[4]
* * __bpf_memmove_bwd() emits: mov_32 d[8],s[8]; mov_64 d[0],s[0]
*/
if (d <= s)
return __bpf_memmove_fwd(d, s, len);
else
return __bpf_memmove_bwd(d, s, len);
}
static __always_inline __nobuiltin("memmove") void memmove(void *d,
const void *s,
__u64 len)
{
return __bpf_memmove(d, s, len);
}
#endif /* __non_bpf_context */
#endif /* __BPF_BUILTINS__ */

124
include/bpf/compiler.h Normal file
View File

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2020 Authors of Cilium */
#ifndef __BPF_COMPILER_H_
#define __BPF_COMPILER_H_
#ifndef __non_bpf_context
# include "stddef.h"
#endif
#ifndef __section
# define __section(X) __attribute__((section(X), used))
#endif
#ifndef __maybe_unused
# define __maybe_unused __attribute__((__unused__))
#endif
#ifndef offsetof
# define offsetof(T, M) __builtin_offsetof(T, M)
#endif
#ifndef field_sizeof
# define field_sizeof(T, M) sizeof((((T *)NULL)->M))
#endif
#ifndef __packed
# define __packed __attribute__((packed))
#endif
#ifndef __nobuiltin
# if __clang_major__ >= 10
# define __nobuiltin(X) __attribute__((no_builtin(X)))
# else
# define __nobuiltin(X)
# endif
#endif
#ifndef likely
# define likely(X) __builtin_expect(!!(X), 1)
#endif
#ifndef unlikely
# define unlikely(X) __builtin_expect(!!(X), 0)
#endif
#ifndef always_succeeds /* Mainly for documentation purpose. */
# define always_succeeds(X) likely(X)
#endif
#undef __always_inline /* stddef.h defines its own */
#define __always_inline inline __attribute__((always_inline))
#ifndef __stringify
# define __stringify(X) #X
#endif
#ifndef __fetch
# define __fetch(X) (__u32)(__u64)(&(X))
#endif
#ifndef __aligned
# define __aligned(X) __attribute__((aligned(X)))
#endif
#ifndef build_bug_on
# define build_bug_on(E) ((void)sizeof(char[1 - 2*!!(E)]))
#endif
#ifndef __throw_build_bug
# define __throw_build_bug() __builtin_trap()
#endif
#ifndef __printf
# define __printf(X, Y) __attribute__((__format__(printf, X, Y)))
#endif
#ifndef barrier
# define barrier() asm volatile("": : :"memory")
#endif
#ifndef barrier_data
# define barrier_data(ptr) asm volatile("": :"r"(ptr) :"memory")
#endif
static __always_inline void bpf_barrier(void)
{
/* Workaround to avoid verifier complaint:
* "dereference of modified ctx ptr R5 off=48+0, ctx+const is allowed,
* ctx+const+const is not"
*/
barrier();
}
#ifndef ARRAY_SIZE
# define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
#endif
#ifndef __READ_ONCE
# define __READ_ONCE(X) (*(volatile typeof(X) *)&X)
#endif
#ifndef __WRITE_ONCE
# define __WRITE_ONCE(X, V) (*(volatile typeof(X) *)&X) = (V)
#endif
/* {READ,WRITE}_ONCE() with verifier workaround via bpf_barrier(). */
#ifndef READ_ONCE
# define READ_ONCE(X) \
({ typeof(X) __val = __READ_ONCE(X); \
bpf_barrier(); \
__val; })
#endif
#ifndef WRITE_ONCE
# define WRITE_ONCE(X, V) \
({ typeof(X) __val = (V); \
__WRITE_ONCE(X, __val); \
bpf_barrier(); \
__val; })
#endif
#endif /* __BPF_COMPILER_H_ */

37
include/bpf/errno.h Normal file
View File

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2016-2020 Authors of Cilium */
#ifndef __BPF_ERRNO__
#define __BPF_ERRNO__
/* Few basic errno codes as we don't want to include errno.h. */
#ifndef EPERM
# define EPERM 1
#endif
#ifndef ENOENT
# define ENOENT 2
#endif
#ifndef ENXIO
# define ENXIO 6
#endif
#ifndef ENOMEM
# define ENOMEM 12
#endif
#ifndef EFAULT
# define EFAULT 14
#endif
#ifndef EINVAL
# define EINVAL 22
#endif
#ifndef ENOTSUP
# define ENOTSUP 95
#endif
#ifndef EADDRINUSE
# define EADDRINUSE 98
#endif
#ifndef ENOTSUPP
# define ENOTSUPP 524
#endif
#endif /* __BPF_ERRNO__ */

172
include/jhash.h Normal file
View File

@ -0,0 +1,172 @@
#ifndef _LINUX_JHASH_H
#define _LINUX_JHASH_H
/* Copied from $(LINUX)/include/linux/jhash.h (kernel 4.18) */
/* jhash.h: Jenkins hash support.
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
* http://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
* lookup3.c, by Bob Jenkins, May 2006, Public Domain.
*
* These are functions for producing 32-bit hashes for hash table lookup.
* hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
* are externally useful functions. Routines to test the hash are included
* if SELF_TEST is defined. You can use this free for any purpose. It's in
* the public domain. It has no warranty.
*
* Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
/* copy paste of jhash from kernel sources (include/linux/jhash.h) to make sure
* LLVM can compile it into valid sequence of BPF instructions
*/
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
b -= a; b ^= rol32(a, 6); a += c; \
c -= b; c ^= rol32(b, 8); b += a; \
a -= c; a ^= rol32(c, 16); c += b; \
b -= a; b ^= rol32(a, 19); a += c; \
c -= b; c ^= rol32(b, 4); b += a; \
}
#define __jhash_final(a, b, c) \
{ \
c ^= b; c -= rol32(b, 14); \
a ^= c; a -= rol32(c, 11); \
b ^= a; b -= rol32(a, 25); \
c ^= b; c -= rol32(b, 16); \
a ^= c; a -= rol32(c, 4); \
b ^= a; b -= rol32(a, 14); \
c ^= b; c -= rol32(b, 24); \
}
#define JHASH_INITVAL 0xdeadbeef
typedef unsigned int u32;
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
* @initval: the previous hash, or an arbitray value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
*
* Returns the hash value of the key. The result depends on endianness.
*/
static inline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + length + initval;
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
a += *(u32 *)(k);
b += *(u32 *)(k + 4);
c += *(u32 *)(k + 8);
__jhash_mix(a, b, c);
length -= 12;
k += 12;
}
/* Last block: affect all 32 bits of (c) */
switch (length) {
case 12: c += (u32)k[11]<<24; /* fall through */
case 11: c += (u32)k[10]<<16; /* fall through */
case 10: c += (u32)k[9]<<8; /* fall through */
case 9: c += k[8]; /* fall through */
case 8: b += (u32)k[7]<<24; /* fall through */
case 7: b += (u32)k[6]<<16; /* fall through */
case 6: b += (u32)k[5]<<8; /* fall through */
case 5: b += k[4]; /* fall through */
case 4: a += (u32)k[3]<<24; /* fall through */
case 3: a += (u32)k[2]<<16; /* fall through */
case 2: a += (u32)k[1]<<8; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
* @initval: the previous hash, or an arbitray value
*
* Returns the hash value of the key.
*/
static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c;
/* Set up the internal state */
a = b = c = JHASH_INITVAL + (length<<2) + initval;
/* Handle most of the key */
while (length > 3) {
a += k[0];
b += k[1];
c += k[2];
__jhash_mix(a, b, c);
length -= 3;
k += 3;
}
/* Handle the last 3 u32's */
switch (length) {
case 3: c += k[2]; /* fall through */
case 2: b += k[1]; /* fall through */
case 1: a += k[0];
__jhash_final(a, b, c);
case 0: /* Nothing left to add */
break;
}
return c;
}
/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
c += initval;
__jhash_final(a, b, c);
return c;
}
static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
}
static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
static inline u32 jhash_1word(u32 a, u32 initval)
{
return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
}
#endif /* _LINUX_JHASH_H */

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 Authors of Cilium */
/* For now, only import the xdp_load_bytes() and xdp_store_bytes() helpers
* from Cilium.
*/
#ifndef __XDP_CONTEXT_HELPERS_H
#define __XDP_CONTEXT_HELPERS_H
#include <linux/types.h>
#include <linux/bpf.h>
#include "../bpf/builtins.h"
#include "../bpf/errno.h"
/* This must be a mask and all offsets guaranteed to be less than that. */
#define __CTX_OFF_MAX 0x3fff
static __always_inline __maybe_unused int
xdp_load_bytes(const struct xdp_md *ctx, __u64 off, void *to, const __u64 len)
{
void *from;
int ret;
/* LLVM tends to generate code that verifier doesn't understand,
* so force it the way we want it in order to open up a range
* on the reg.
*/
asm volatile("r1 = *(u32 *)(%[ctx] +0)\n\t"
"r2 = *(u32 *)(%[ctx] +4)\n\t"
"%[off] &= %[offmax]\n\t"
"r1 += %[off]\n\t"
"%[from] = r1\n\t"
"r1 += %[len]\n\t"
"if r1 > r2 goto +2\n\t"
"%[ret] = 0\n\t"
"goto +1\n\t"
"%[ret] = %[errno]\n\t"
: [ret]"=r"(ret), [from]"=r"(from)
: [ctx]"r"(ctx), [off]"r"(off), [len]"ri"(len),
[offmax]"i"(__CTX_OFF_MAX), [errno]"i"(-EINVAL)
: "r1", "r2");
if (!ret)
memcpy(to, from, len);
return ret;
}
static __always_inline __maybe_unused int
xdp_store_bytes(const struct xdp_md *ctx, __u64 off, const void *from,
const __u64 len, __u64 flags __maybe_unused)
{
void *to;
int ret;
/* See xdp_load_bytes(). */
asm volatile("r1 = *(u32 *)(%[ctx] +0)\n\t"
"r2 = *(u32 *)(%[ctx] +4)\n\t"
"%[off] &= %[offmax]\n\t"
"r1 += %[off]\n\t"
"%[to] = r1\n\t"
"r1 += %[len]\n\t"
"if r1 > r2 goto +2\n\t"
"%[ret] = 0\n\t"
"goto +1\n\t"
"%[ret] = %[errno]\n\t"
: [ret]"=r"(ret), [to]"=r"(to)
: [ctx]"r"(ctx), [off]"r"(off), [len]"ri"(len),
[offmax]"i"(__CTX_OFF_MAX), [errno]"i"(-EINVAL)
: "r1", "r2");
if (!ret)
memcpy(to, from, len);
return ret;
}
#endif

View File

@ -0,0 +1,320 @@
/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-clause) */
/*
* This file contains parsing functions that are used in the packetXX XDP
* programs. The functions are marked as __always_inline, and fully defined in
* this header file to be included in the BPF program.
*
* Each helper parses a packet header, including doing bounds checking, and
* returns the type of its contents if successful, and -1 otherwise.
*
* For Ethernet and IP headers, the content type is the type of the payload
* (h_proto for Ethernet, nexthdr for IPv6), for ICMP it is the ICMP type field.
* All return values are in host byte order.
*
* The versions of the functions included here are slightly expanded versions of
* the functions in the packet01 lesson. For instance, the Ethernet header
* parsing has support for parsing VLAN tags.
*/
#ifndef __PARSING_HELPERS_H
#define __PARSING_HELPERS_H
#include <stddef.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/in.h>
#include <bpf/bpf_endian.h>
/* Header cursor to keep track of current parsing position */
struct hdr_cursor {
void *pos;
};
/*
* struct vlan_hdr - vlan header
* @h_vlan_TCI: priority and VLAN ID
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
/*
* Struct icmphdr_common represents the common part of the icmphdr and icmp6hdr
* structures.
*/
struct icmphdr_common {
__u8 type;
__u8 code;
__sum16 cksum;
};
/* Allow users of header file to redefine VLAN max depth */
#ifndef VLAN_MAX_DEPTH
#define VLAN_MAX_DEPTH 2
#endif
/* Longest chain of IPv6 extension headers to resolve */
#ifndef IPV6_EXT_MAX_CHAIN
#define IPV6_EXT_MAX_CHAIN 6
#endif
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
/* Struct for collecting VLANs after parsing via parse_ethhdr_vlan */
struct collect_vlans {
__u16 id[VLAN_MAX_DEPTH];
};
static __always_inline int proto_is_vlan(__u16 h_proto)
{
return !!(h_proto == bpf_htons(ETH_P_8021Q) ||
h_proto == bpf_htons(ETH_P_8021AD));
}
/* Notice, parse_ethhdr() will skip VLAN tags, by advancing nh->pos and returns
* next header EtherType, BUT the ethhdr pointer supplied still points to the
* Ethernet header. Thus, caller can look at eth->h_proto to see if this was a
* VLAN tagged packet.
*/
static __always_inline int parse_ethhdr_vlan(struct hdr_cursor *nh,
void *data_end,
struct ethhdr **ethhdr,
struct collect_vlans *vlans)
{
struct ethhdr *eth = nh->pos;
int hdrsize = sizeof(*eth);
struct vlan_hdr *vlh;
__u16 h_proto;
int i;
/* Byte-count bounds check; check if current pointer + size of header
* is after data_end.
*/
if (nh->pos + hdrsize > data_end)
return -1;
nh->pos += hdrsize;
*ethhdr = eth;
vlh = nh->pos;
h_proto = eth->h_proto;
/* Use loop unrolling to avoid the verifier restriction on loops;
* support up to VLAN_MAX_DEPTH layers of VLAN encapsulation.
*/
#pragma unroll
for (i = 0; i < VLAN_MAX_DEPTH; i++) {
if (!proto_is_vlan(h_proto))
break;
if (vlh + 1 > data_end)
break;
h_proto = vlh->h_vlan_encapsulated_proto;
if (vlans) /* collect VLAN ids */
vlans->id[i] =
(bpf_ntohs(vlh->h_vlan_TCI) & VLAN_VID_MASK);
vlh++;
}
nh->pos = vlh;
return h_proto; /* network-byte-order */
}
static __always_inline int parse_ethhdr(struct hdr_cursor *nh,
void *data_end,
struct ethhdr **ethhdr)
{
/* Expect compiler removes the code that collects VLAN ids */
return parse_ethhdr_vlan(nh, data_end, ethhdr, NULL);
}
static __always_inline int skip_ip6hdrext(struct hdr_cursor *nh,
void *data_end,
__u8 next_hdr_type)
{
for (int i = 0; i < IPV6_EXT_MAX_CHAIN; ++i) {
struct ipv6_opt_hdr *hdr = nh->pos;
if (hdr + 1 > data_end)
return -1;
switch (next_hdr_type) {
case IPPROTO_HOPOPTS:
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_MH:
nh->pos = (char *)hdr + (hdr->hdrlen + 1) * 8;
next_hdr_type = hdr->nexthdr;
break;
case IPPROTO_AH:
nh->pos = (char *)hdr + (hdr->hdrlen + 2) * 4;
next_hdr_type = hdr->nexthdr;
break;
case IPPROTO_FRAGMENT:
nh->pos = (char *)hdr + 8;
next_hdr_type = hdr->nexthdr;
break;
default:
/* Found a header that is not an IPv6 extension header */
return next_hdr_type;
}
}
return -1;
}
static __always_inline int parse_ip6hdr(struct hdr_cursor *nh,
void *data_end,
struct ipv6hdr **ip6hdr)
{
struct ipv6hdr *ip6h = nh->pos;
/* Pointer-arithmetic bounds check; pointer +1 points to after end of
* thing being pointed to. We will be using this style in the remainder
* of the tutorial.
*/
if (ip6h + 1 > data_end)
return -1;
if (ip6h->version != 6)
return -1;
nh->pos = ip6h + 1;
*ip6hdr = ip6h;
return skip_ip6hdrext(nh, data_end, ip6h->nexthdr);
}
static __always_inline int parse_iphdr(struct hdr_cursor *nh,
void *data_end,
struct iphdr **iphdr)
{
struct iphdr *iph = nh->pos;
int hdrsize;
if (iph + 1 > data_end)
return -1;
if (iph->version != 4)
return -1;
hdrsize = iph->ihl * 4;
/* Sanity check packet field is valid */
if(hdrsize < sizeof(*iph))
return -1;
/* Variable-length IPv4 header, need to use byte-based arithmetic */
if (nh->pos + hdrsize > data_end)
return -1;
nh->pos += hdrsize;
*iphdr = iph;
return iph->protocol;
}
static __always_inline int parse_icmp6hdr(struct hdr_cursor *nh,
void *data_end,
struct icmp6hdr **icmp6hdr)
{
struct icmp6hdr *icmp6h = nh->pos;
if (icmp6h + 1 > data_end)
return -1;
nh->pos = icmp6h + 1;
*icmp6hdr = icmp6h;
return icmp6h->icmp6_type;
}
static __always_inline int parse_icmphdr(struct hdr_cursor *nh,
void *data_end,
struct icmphdr **icmphdr)
{
struct icmphdr *icmph = nh->pos;
if (icmph + 1 > data_end)
return -1;
nh->pos = icmph + 1;
*icmphdr = icmph;
return icmph->type;
}
static __always_inline int parse_icmphdr_common(struct hdr_cursor *nh,
void *data_end,
struct icmphdr_common **icmphdr)
{
struct icmphdr_common *h = nh->pos;
if (h + 1 > data_end)
return -1;
nh->pos = h + 1;
*icmphdr = h;
return h->type;
}
/*
* parse_udphdr: parse the udp header and return the length of the udp payload
*/
static __always_inline int parse_udphdr(struct hdr_cursor *nh,
void *data_end,
struct udphdr **udphdr)
{
int len;
struct udphdr *h = nh->pos;
if (h + 1 > data_end)
return -1;
nh->pos = h + 1;
*udphdr = h;
len = bpf_ntohs(h->len) - sizeof(struct udphdr);
if (len < 0)
return -1;
return len;
}
/*
* parse_tcphdr: parse and return the length of the tcp header
*/
static __always_inline int parse_tcphdr(struct hdr_cursor *nh,
void *data_end,
struct tcphdr **tcphdr)
{
int len;
struct tcphdr *h = nh->pos;
if (h + 1 > data_end)
return -1;
len = h->doff * 4;
/* Sanity check packet field is valid */
if(len < sizeof(*h))
return -1;
/* Variable-length TCP header, need to use byte-based arithmetic */
if (nh->pos + len > data_end)
return -1;
nh->pos += len;
*tcphdr = h;
return len;
}
#endif /* __PARSING_HELPERS_H */

51
lib/Makefile Normal file
View File

@ -0,0 +1,51 @@
LIBBPF_CFLAGS:=$(if $(CFLAGS),$(CFLAGS),-g -O2 -Werror -Wall) -fPIC
LIB_DIR = .
include defines.mk
SUBDIRS=
all: $(OBJECT_LIBBPF)
@set -e; \
for i in $(SUBDIRS); \
do echo; echo " $$i"; $(MAKE) -C $$i; done
.PHONY: clean
clean: libbpf_clean
@for i in $(SUBDIRS); \
do $(MAKE) -C $$i clean; done
install:
install -m 0755 -d $(DESTDIR)$(HDRDIR)
$(MAKE) -C libxdp install
$(MAKE) -C testing install
libbpf: $(OBJECT_LIBBPF)
# Handle libbpf as git submodule
ifeq ($(SYSTEM_LIBBPF),n)
ifeq ($(VERBOSE),0)
P:= >/dev/null
endif
# Detect submodule libbpf source file changes
LIBBPF_SOURCES := $(wildcard libbpf/src/*.[ch])
LIBBPF_INSTALL := libbpf-install
INSTDIR=../../$(LIBBPF_INSTALL)
.PHONY: libbpf_clean
libbpf/src/libbpf.a: $(LIBBPF_SOURCES)
@echo ; echo " libbpf"
$(QUIET_CC)$(MAKE) -C libbpf/src CFLAGS="$(LIBBPF_CFLAGS)" $P
$(QUIET_INSTALL)$(MAKE) -C libbpf/src DESTDIR=$(INSTDIR) install_headers $P
libbpf_clean:
$(Q)$(MAKE) -C libbpf/src clean $P
$(Q)$(RM) -r $(LIBBPF_INSTALL)
else
libbpf_clean:
@echo -n
endif

97
lib/common.mk Normal file
View File

@ -0,0 +1,97 @@
# Common Makefile parts for BPF-building with libbpf
# --------------------------------------------------
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
#
# This file should be included from your Makefile like:
# LIB_DIR = ../lib/
# include $(LIB_DIR)/common.mk
#
# It is expected that you define the variables:
# BPF_TARGETS and USER_TARGETS
# as a space-separated list
#
BPF_C = ${BPF_TARGETS:=.c}
BPF_OBJ = ${BPF_C:.c=.o}
USER_C := ${USER_TARGETS:=.c}
USER_OBJ := ${USER_C:.c=.o}
BPF_OBJ_INSTALL ?= $(BPF_OBJ)
# Expect this is defined by including Makefile, but define if not
LIB_DIR ?= ../lib
LDLIBS ?= $(USER_LIBS)
# get list of objects in util
include $(LIB_DIR)/util/util.mk
include $(LIB_DIR)/defines.mk
# Extend if including Makefile already added some
LIB_OBJS += $(foreach obj,$(UTIL_OBJS),$(LIB_DIR)/util/$(obj))
EXTRA_DEPS +=
EXTRA_USER_DEPS +=
# Detect submodule libbpf source file changes
ifeq ($(SYSTEM_LIBBPF),n)
LIBBPF_SOURCES := $(wildcard $(LIBBPF_DIR)/src/*.[ch])
endif
# The BPF tracing header (/usr/include/bpf/bpf_tracing.h) need to know
# CPU architecture due to PT_REGS_PARM resolution of ASM call convention
#
ARCH := $(shell uname -m | sed 's/x86_64/x86/' | sed 's/aarch64/arm64/' | sed 's/ppc64le/powerpc/' | sed 's/mips.*/mips/')
BPF_CFLAGS += -D__TARGET_ARCH_$(ARCH)
# BPF-prog kern and userspace shares struct via header file:
KERN_USER_H ?= $(wildcard common_kern_user.h)
CFLAGS += -I$(INCLUDE_DIR) -I$(HEADER_DIR) -I$(LIB_DIR)/util $(EXTRA_CFLAGS)
BPF_CFLAGS += -I$(INCLUDE_DIR) -I$(HEADER_DIR) $(EXTRA_CFLAGS)
BPF_HEADERS := $(wildcard $(HEADER_DIR)/*/*.h) $(wildcard $(INCLUDE_DIR)/*/*.h)
all: $(USER_TARGETS) $(BPF_OBJ) $(EXTRA_TARGETS)
.PHONY: clean
clean::
$(Q)rm -f $(USER_TARGETS) $(BPF_OBJ) $(USER_OBJ) $(USER_GEN) *.ll
$(OBJECT_LIBBPF): $(LIBBPF_SOURCES)
$(Q)$(MAKE) -C $(LIB_DIR) libbpf
$(CONFIGMK):
$(Q)$(MAKE) -C $(LIB_DIR)/.. config.mk
# Create expansions for dependencies
LIB_H := ${LIB_OBJS:.o=.h}
# Detect if any of common obj changed and create dependency on .h-files
$(LIB_OBJS): %.o: %.c %.h $(LIB_H)
$(Q)$(MAKE) -C $(dir $@) $(notdir $@)
$(USER_TARGETS): %: %.c $(OBJECT_LIBBPF) $(OBJECT_LIBXDP) $(LIBMK) $(LIB_OBJS) $(KERN_USER_H) $(EXTRA_DEPS) $(EXTRA_USER_DEPS)
$(QUIET_CC)$(CC) -Wall $(CFLAGS) $(LDFLAGS) -o $@ $(LIB_OBJS) \
$< $(LDLIBS)
$(BPF_OBJ): %.o: %.c $(KERN_USER_H) $(EXTRA_DEPS) $(BPF_HEADERS) $(LIBMK)
$(QUIET_CLANG)$(CLANG) -S \
-target bpf \
-D __BPF_TRACING__ \
$(BPF_CFLAGS) \
-Wall \
-Wno-unused-value \
-Wno-pointer-sign \
-Wno-compare-distinct-pointer-types \
-O2 -emit-llvm -c -g -o ${@:.o=.ll} $<
$(QUIET_LLC)$(LLC) -march=bpf -filetype=obj -o $@ ${@:.o=.ll}
.PHONY: test
ifeq ($(TEST_FILE),)
test:
@echo " No tests defined"
else
test: all
$(Q)$(TEST_DIR)/test_runner.sh $(TEST_FILE)
endif

36
lib/defines.mk Normal file
View File

@ -0,0 +1,36 @@
CFLAGS ?= -O2 -g
BPF_CFLAGS ?= -Wno-visibility
include $(LIB_DIR)/../config.mk
PREFIX?=/usr/local
LIBDIR?=$(PREFIX)/lib
SBINDIR?=$(PREFIX)/sbin
HDRDIR?=$(PREFIX)/include/xdp
DATADIR?=$(PREFIX)/share
MANDIR?=$(DATADIR)/man
BPF_DIR_MNT ?=/sys/fs/bpf
BPF_OBJECT_DIR ?=$(LIBDIR)/bpf
MAX_DISPATCHER_ACTIONS ?=10
# headers/ dir contains include header files needed to compile BPF programs
HEADER_DIR = $(LIB_DIR)/../headers
# include/ dir contains the projects own include header files
INCLUDE_DIR = $(LIB_DIR)/../include
TEST_DIR = $(LIB_DIR)/testing
LIBBPF_DIR := $(LIB_DIR)/libbpf
DEFINES := -DBPF_DIR_MNT=\"$(BPF_DIR_MNT)\" -DBPF_OBJECT_PATH=\"$(BPF_OBJECT_DIR)\"
ifneq ($(PRODUCTION),1)
DEFINES += -DDEBUG
endif
HAVE_FEATURES :=
CFLAGS += $(DEFINES)
BPF_CFLAGS += $(DEFINES)
CONFIGMK := $(LIB_DIR)/../config.mk
LIBMK := Makefile $(CONFIGMK) $(LIB_DIR)/defines.mk $(LIB_DIR)/common.mk $(LIB_DIR)/util/util.mk

1
lib/libbpf Submodule

Submodule lib/libbpf added at 1d6106cf45

2
lib/util/util.mk Normal file
View File

@ -0,0 +1,2 @@
# list of objects in this directory
UTIL_OBJS :=