From d2ef9f2f6d16d34d7eee74cb8efd269341fec5a1 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 6 May 2019 16:54:17 +0800 Subject: [PATCH] core-linux: support layerscape This is an integrated patch of core-linux for layerscape Signed-off-by: Aaron Lu Signed-off-by: Abhijit Ayarekar Signed-off-by: Amrita Kumari Signed-off-by: Andrew Morton Signed-off-by: Ashish Kumar Signed-off-by: Biwen Li Signed-off-by: Camelia Groza Signed-off-by: Christoph Hellwig Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman Signed-off-by: Guanhua Gao Signed-off-by: Ioana Ciornei Signed-off-by: Jiri Pirko Signed-off-by: Joel Fernandes Signed-off-by: Laurentiu Tudor Signed-off-by: Linus Torvalds Signed-off-by: Li Yang Signed-off-by: Lukas Wunner Signed-off-by: Madalin Bucur Signed-off-by: Mark Brown Signed-off-by: Nikhil Badola Signed-off-by: Nipun Gupta Signed-off-by: Pankaj Bansal Signed-off-by: pascal paillet Signed-off-by: Rafael J. Wysocki Signed-off-by: Ramneek Mehresh Signed-off-by: Robin Murphy Signed-off-by: Suresh Gupta Signed-off-by: Vivek Gautam Signed-off-by: Yangbo Lu --- drivers/base/core.c | 122 ++++++++++++++++++++++++++---- drivers/base/dma-mapping.c | 7 ++ drivers/gpu/ipu-v3/ipu-pre.c | 3 +- drivers/gpu/ipu-v3/ipu-prg.c | 3 +- drivers/iommu/dma-iommu.c | 3 + drivers/mux/Kconfig | 12 +-- drivers/mux/mmio.c | 6 +- drivers/of/device.c | 14 +++- drivers/soc/imx/gpc.c | 2 +- include/linux/device.h | 20 +++-- include/linux/fsl_devices.h | 2 + include/linux/netdevice.h | 10 ++- include/linux/skbuff.h | 2 + lib/dma-noop.c | 19 +++++ mm/page_alloc.c | 10 ++- net/core/dev.c | 81 ++++++++++++-------- net/core/skbuff.c | 29 ++++++- samples/bpf/Makefile | 12 ++- samples/bpf/map_perf_test_kern.c | 2 +- samples/bpf/map_perf_test_user.c | 2 +- tools/testing/selftests/bpf/bpf_helpers.h | 56 ++++++++++++-- 21 files changed, 337 insertions(+), 80 deletions(-) --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -177,10 +177,10 @@ static int device_reorder_to_tail(struct * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * - * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically - * when the consumer device driver unbinds from it. The combination of both - * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL - * to be returned. + * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed + * automatically when the consumer device driver unbinds from it. + * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS + * set is invalid and will cause NULL to be returned. * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending @@ -198,7 +198,8 @@ struct device_link *device_link_add(stru bool rpm_put_supplier = false; if (!consumer || !supplier || - ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE))) + ((flags & DL_FLAG_STATELESS) && + (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) return NULL; if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { @@ -224,8 +225,10 @@ struct device_link *device_link_add(stru } list_for_each_entry(link, &supplier->links.consumers, s_node) - if (link->consumer == consumer) + if (link->consumer == consumer) { + kref_get(&link->kref); goto out; + } link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) @@ -252,6 +255,7 @@ struct device_link *device_link_add(stru link->consumer = consumer; INIT_LIST_HEAD(&link->c_node); link->flags = flags; + kref_init(&link->kref); /* Determine the initial link state. */ if (flags & DL_FLAG_STATELESS) { @@ -326,8 +330,10 @@ static void __device_link_free_srcu(stru device_link_free(container_of(rhead, struct device_link, rcu_head)); } -static void __device_link_del(struct device_link *link) +static void __device_link_del(struct kref *kref) { + struct device_link *link = container_of(kref, struct device_link, kref); + dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); @@ -339,8 +345,10 @@ static void __device_link_del(struct dev call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); } #else /* !CONFIG_SRCU */ -static void __device_link_del(struct device_link *link) +static void __device_link_del(struct kref *kref) { + struct device_link *link = container_of(kref, struct device_link, kref); + dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); @@ -358,18 +366,50 @@ static void __device_link_del(struct dev * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime - * PM. + * PM. If the link was added multiple times, it needs to be deleted as often. + * Care is required for hotplugged devices: Their links are purged on removal + * and calling device_link_del() is then no longer allowed. */ void device_link_del(struct device_link *link) { device_links_write_lock(); device_pm_lock(); - __device_link_del(link); + kref_put(&link->kref, __device_link_del); device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); +/** + * device_link_remove - remove a link between two devices. + * @consumer: Consumer end of the link. + * @supplier: Supplier end of the link. + * + * The caller must ensure proper synchronization of this function with runtime + * PM. + */ +void device_link_remove(void *consumer, struct device *supplier) +{ + struct device_link *link; + + if (WARN_ON(consumer == supplier)) + return; + + device_links_write_lock(); + device_pm_lock(); + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer == consumer) { + kref_put(&link->kref, __device_link_del); + break; + } + } + + device_pm_unlock(); + device_links_write_unlock(); +} +EXPORT_SYMBOL_GPL(device_link_remove); + static void device_links_missing_supplier(struct device *dev) { struct device_link *link; @@ -477,8 +517,8 @@ static void __device_links_no_driver(str if (link->flags & DL_FLAG_STATELESS) continue; - if (link->flags & DL_FLAG_AUTOREMOVE) - __device_link_del(link); + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + kref_put(&link->kref, __device_link_del); else if (link->status != DL_STATE_SUPPLIER_UNBIND) WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } @@ -513,8 +553,18 @@ void device_links_driver_cleanup(struct if (link->flags & DL_FLAG_STATELESS) continue; - WARN_ON(link->flags & DL_FLAG_AUTOREMOVE); + WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); + + /* + * autoremove the links between this @dev and its consumer + * devices that are not active, i.e. where the link state + * has moved to DL_STATE_SUPPLIER_UNBIND. + */ + if (link->status == DL_STATE_SUPPLIER_UNBIND && + link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + kref_put(&link->kref, __device_link_del); + WRITE_ONCE(link->status, DL_STATE_DORMANT); } @@ -631,13 +681,13 @@ static void device_links_purge(struct de list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { WARN_ON(link->status == DL_STATE_ACTIVE); - __device_link_del(link); + __device_link_del(&link->kref); } list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { WARN_ON(link->status != DL_STATE_DORMANT && link->status != DL_STATE_NONE); - __device_link_del(link); + __device_link_del(&link->kref); } device_links_write_unlock(); @@ -1059,6 +1109,34 @@ static ssize_t online_store(struct devic } static DEVICE_ATTR_RW(online); +static ssize_t suppliers_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct device_link *link; + size_t count = 0; + + list_for_each_entry(link, &dev->links.suppliers, c_node) + count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n", + dev_name(link->supplier)); + + return count; +} +static DEVICE_ATTR_RO(suppliers); + +static ssize_t consumers_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct device_link *link; + size_t count = 0; + + list_for_each_entry(link, &dev->links.consumers, s_node) + count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n", + dev_name(link->consumer)); + + return count; +} +static DEVICE_ATTR_RO(consumers); + int device_add_groups(struct device *dev, const struct attribute_group **groups) { return sysfs_create_groups(&dev->kobj, groups); @@ -1230,8 +1308,20 @@ static int device_add_attrs(struct devic goto err_remove_dev_groups; } + error = device_create_file(dev, &dev_attr_suppliers); + if (error) + goto err_remove_online; + + error = device_create_file(dev, &dev_attr_consumers); + if (error) + goto err_remove_suppliers; + return 0; + err_remove_suppliers: + device_remove_file(dev, &dev_attr_suppliers); + err_remove_online: + device_remove_file(dev, &dev_attr_online); err_remove_dev_groups: device_remove_groups(dev, dev->groups); err_remove_type_groups: @@ -1249,6 +1339,8 @@ static void device_remove_attrs(struct d struct class *class = dev->class; const struct device_type *type = dev->type; + device_remove_file(dev, &dev_attr_consumers); + device_remove_file(dev, &dev_attr_suppliers); device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -335,6 +335,7 @@ void dma_common_free_remap(void *cpu_add * Common configuration to enable DMA API use for a device */ #include +#include int dma_configure(struct device *dev) { @@ -350,6 +351,12 @@ int dma_configure(struct device *dev) dma_dev = dma_dev->parent; } + if (dev_is_fsl_mc(dev)) { + dma_dev = dev; + while (dev_is_fsl_mc(dma_dev)) + dma_dev = dma_dev->parent; + } + if (dma_dev->of_node) { ret = of_dma_configure(dev, dma_dev->of_node); } else if (has_acpi_companion(dma_dev)) { --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c @@ -125,7 +125,8 @@ ipu_pre_lookup_by_phandle(struct device list_for_each_entry(pre, &ipu_pre_list, list) { if (pre_node == pre->dev->of_node) { mutex_unlock(&ipu_pre_list_mutex); - device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); + device_link_add(dev, pre->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); of_node_put(pre_node); return pre; } --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c @@ -99,7 +99,8 @@ ipu_prg_lookup_by_phandle(struct device list_for_each_entry(prg, &ipu_prg_list, list) { if (prg_node == prg->dev->of_node) { mutex_unlock(&ipu_prg_list_mutex); - device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); + device_link_add(dev, prg->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); prg->id = ipu_id; of_node_put(prg_node); return prg; --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -381,6 +381,9 @@ static dma_addr_t iommu_dma_alloc_iova(s if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) iova_len = roundup_pow_of_two(iova_len); + if (dev->bus_dma_mask) + dma_limit &= dev->bus_dma_mask; + if (domain->geometry.force_aperture) dma_limit = min(dma_limit, domain->geometry.aperture_end); --- a/drivers/mux/Kconfig +++ b/drivers/mux/Kconfig @@ -35,14 +35,14 @@ config MUX_GPIO be called mux-gpio. config MUX_MMIO - tristate "MMIO register bitfield-controlled Multiplexer" - depends on (OF && MFD_SYSCON) || COMPILE_TEST + tristate "MMIO/Regmap register bitfield-controlled Multiplexer" + depends on OF || COMPILE_TEST help - MMIO register bitfield-controlled Multiplexer controller. + MMIO/Regmap register bitfield-controlled Multiplexer controller. - The driver builds multiplexer controllers for bitfields in a syscon - register. For N bit wide bitfields, there will be 2^N possible - multiplexer states. + The driver builds multiplexer controllers for bitfields in either + a syscon register or a driver regmap register. For N bit wide + bitfields, there will be 2^N possible multiplexer states. To compile the driver as a module, choose M here: the module will be called mux-mmio. --- a/drivers/mux/mmio.c +++ b/drivers/mux/mmio.c @@ -31,6 +31,7 @@ static const struct mux_control_ops mux_ static const struct of_device_id mux_mmio_dt_ids[] = { { .compatible = "mmio-mux", }, + { .compatible = "reg-mux", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mux_mmio_dt_ids); @@ -46,7 +47,10 @@ static int mux_mmio_probe(struct platfor int ret; int i; - regmap = syscon_node_to_regmap(np->parent); + if (of_device_is_compatible(np, "mmio-mux")) + regmap = syscon_node_to_regmap(np->parent); + else + regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); dev_err(dev, "failed to get regmap: %d\n", ret); --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -15,6 +15,9 @@ #include #include "of_private.h" +#ifdef CONFIG_FSL_MC_BUS +#include +#endif /** * of_match_device - Tell if a struct device matches an of_device_id list @@ -105,6 +108,9 @@ int of_dma_configure(struct device *dev, #ifdef CONFIG_ARM_AMBA dev->bus != &amba_bustype && #endif +#ifdef CONFIG_FSL_MC_BUS + dev->bus != &fsl_mc_bus_type && +#endif dev->bus != &platform_bus_type) return ret == -ENODEV ? 0 : ret; @@ -152,10 +158,16 @@ int of_dma_configure(struct device *dev, * set by the driver. */ mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); + dev->bus_dma_mask = mask; dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; - coherent = of_dma_is_coherent(np); +#ifdef CONFIG_FSL_MC_BUS + if (dev_is_fsl_mc(dev)) + coherent = fsl_mc_is_dev_coherent(dev); + else +#endif + coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", coherent ? " " : " not "); --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c @@ -210,7 +210,7 @@ static int imx_pgc_power_domain_probe(st goto genpd_err; } - device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE); + device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER); return 0; --- a/include/linux/device.h +++ b/include/linux/device.h @@ -55,6 +55,8 @@ struct bus_attribute { struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) #define BUS_ATTR_RO(_name) \ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) +#define BUS_ATTR_WO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_WO(_name) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); @@ -751,14 +753,16 @@ enum device_link_state { * Device link flags. * * STATELESS: The core won't track the presence of supplier/consumer drivers. - * AUTOREMOVE: Remove this link automatically on consumer driver unbind. + * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind. * PM_RUNTIME: If set, the runtime PM framework will use this link. * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. + * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. */ -#define DL_FLAG_STATELESS BIT(0) -#define DL_FLAG_AUTOREMOVE BIT(1) -#define DL_FLAG_PM_RUNTIME BIT(2) -#define DL_FLAG_RPM_ACTIVE BIT(3) +#define DL_FLAG_STATELESS BIT(0) +#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) +#define DL_FLAG_PM_RUNTIME BIT(2) +#define DL_FLAG_RPM_ACTIVE BIT(3) +#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) /** * struct device_link - Device link representation. @@ -769,6 +773,7 @@ enum device_link_state { * @status: The state of the link (with respect to the presence of drivers). * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @kref: Count repeated addition of the same link. * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. */ struct device_link { @@ -779,6 +784,7 @@ struct device_link { enum device_link_state status; u32 flags; bool rpm_active; + struct kref kref; #ifdef CONFIG_SRCU struct rcu_head rcu_head; #endif @@ -851,6 +857,8 @@ struct dev_links_info { * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. + * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA + * limit than the device itself supports. * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. @@ -930,6 +938,7 @@ struct device { not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ + u64 bus_dma_mask; /* upstream dma_mask constraint */ unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; @@ -1268,6 +1277,7 @@ extern const char *dev_driver_string(con struct device_link *device_link_add(struct device *consumer, struct device *supplier, u32 flags); void device_link_del(struct device_link *link); +void device_link_remove(void *consumer, struct device *supplier); #ifdef CONFIG_PRINTK --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -99,7 +99,9 @@ struct fsl_usb2_platform_data { unsigned suspended:1; unsigned already_suspended:1; unsigned has_fsl_erratum_a007792:1; + unsigned has_fsl_erratum_14:1; unsigned has_fsl_erratum_a005275:1; + unsigned has_fsl_erratum_a006918:1; unsigned has_fsl_erratum_a005697:1; unsigned check_phy_clk_valid:1; --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2350,7 +2350,8 @@ int register_netdevice_notifier(struct n int unregister_netdevice_notifier(struct notifier_block *nb); struct netdev_notifier_info { - struct net_device *dev; + struct net_device *dev; + struct netlink_ext_ack *extack; }; struct netdev_notifier_info_ext { @@ -2382,6 +2383,7 @@ static inline void netdev_notifier_info_ struct net_device *dev) { info->dev = dev; + info->extack = NULL; } static inline struct net_device * @@ -2390,6 +2392,12 @@ netdev_notifier_info_to_dev(const struct return info->dev; } +static inline struct netlink_ext_ack * +netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) +{ + return info->extack; +} + int call_netdevice_notifiers(unsigned long val, struct net_device *dev); --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -964,6 +964,7 @@ void kfree_skb_list(struct sk_buff *segs void skb_tx_error(struct sk_buff *skb); void consume_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb); +void skb_recycle(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -3344,6 +3345,7 @@ static inline void skb_free_datagram_loc } int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, __wsum csum); --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -58,11 +58,30 @@ static int dma_noop_map_sg(struct device return nents; } +static int dma_noop_supported(struct device *dev, u64 mask) +{ +#ifdef CONFIG_ZONE_DMA + if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) + return 0; +#else + /* + * Because 32-bit DMA masks are so common we expect every architecture + * to be able to satisfy them - either by not supporting more physical + * memory, or by providing a ZONE_DMA32. If neither is the case, the + * architecture needs to use an IOMMU instead of the direct mapping. + */ + if (dev->bus_dma_mask && mask > dev->bus_dma_mask) + return 0; +#endif + return 1; +} + const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, + dma_supported = dma_noop_supported }; EXPORT_SYMBOL(dma_noop_ops); --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4373,8 +4373,14 @@ void page_frag_free(void *addr) { struct page *page = virt_to_head_page(addr); - if (unlikely(put_page_testzero(page))) - __free_pages_ok(page, compound_order(page)); + if (unlikely(put_page_testzero(page))) { + unsigned int order = compound_order(page); + + if (order == 0) /* Via pcp? */ + free_hot_cold_page(page, false); + else + __free_pages_ok(page, order); + } } EXPORT_SYMBOL(page_frag_free); --- a/net/core/dev.c +++ b/net/core/dev.c @@ -163,7 +163,6 @@ static struct list_head offload_base __r static int netif_rx_internal(struct sk_buff *skb); static int call_netdevice_notifiers_info(unsigned long val, - struct net_device *dev, struct netdev_notifier_info *info); static struct napi_struct *napi_by_id(unsigned int napi_id); @@ -1308,10 +1307,11 @@ EXPORT_SYMBOL(netdev_features_change); void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { - struct netdev_notifier_change_info change_info; + struct netdev_notifier_change_info change_info = { + .info.dev = dev, + }; - change_info.flags_changed = 0; - call_netdevice_notifiers_info(NETDEV_CHANGE, dev, + call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); } @@ -1532,9 +1532,10 @@ EXPORT_SYMBOL(dev_disable_lro); static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, struct net_device *dev) { - struct netdev_notifier_info info; + struct netdev_notifier_info info = { + .dev = dev, + }; - netdev_notifier_info_init(&info, dev); return nb->notifier_call(nb, val, &info); } @@ -1659,11 +1660,9 @@ EXPORT_SYMBOL(unregister_netdevice_notif */ static int call_netdevice_notifiers_info(unsigned long val, - struct net_device *dev, struct netdev_notifier_info *info) { ASSERT_RTNL(); - netdev_notifier_info_init(info, dev); return raw_notifier_call_chain(&netdev_chain, val, info); } @@ -1678,9 +1677,11 @@ static int call_netdevice_notifiers_info int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { - struct netdev_notifier_info info; + struct netdev_notifier_info info = { + .dev = dev, + }; - return call_netdevice_notifiers_info(val, dev, &info); + return call_netdevice_notifiers_info(val, &info); } EXPORT_SYMBOL(call_netdevice_notifiers); @@ -1703,7 +1704,7 @@ static int call_netdevice_notifiers_mtu( BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); - return call_netdevice_notifiers_info(val, dev, &info.info); + return call_netdevice_notifiers_info(val, &info.info); } #ifdef CONFIG_NET_INGRESS @@ -6346,7 +6347,15 @@ static int __netdev_upper_dev_link(struc struct net_device *upper_dev, bool master, void *upper_priv, void *upper_info) { - struct netdev_notifier_changeupper_info changeupper_info; + struct netdev_notifier_changeupper_info changeupper_info = { + .info = { + .dev = dev, + }, + .upper_dev = upper_dev, + .master = master, + .linking = true, + .upper_info = upper_info, + }; int ret = 0; ASSERT_RTNL(); @@ -6364,12 +6373,7 @@ static int __netdev_upper_dev_link(struc if (master && netdev_master_upper_dev_get(dev)) return -EBUSY; - changeupper_info.upper_dev = upper_dev; - changeupper_info.master = master; - changeupper_info.linking = true; - changeupper_info.upper_info = upper_info; - - ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, + ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, &changeupper_info.info); ret = notifier_to_errno(ret); if (ret) @@ -6381,7 +6385,7 @@ static int __netdev_upper_dev_link(struc return ret; netdev_update_addr_mask(dev); - ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, + ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, &changeupper_info.info); ret = notifier_to_errno(ret); if (ret) @@ -6445,21 +6449,25 @@ EXPORT_SYMBOL(netdev_master_upper_dev_li void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev) { - struct netdev_notifier_changeupper_info changeupper_info; + struct netdev_notifier_changeupper_info changeupper_info = { + .info = { + .dev = dev, + }, + .upper_dev = upper_dev, + .linking = false, + }; ASSERT_RTNL(); - changeupper_info.upper_dev = upper_dev; changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev; - changeupper_info.linking = false; - call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev, + call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, &changeupper_info.info); __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev); netdev_update_addr_mask(dev); - call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev, + call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, &changeupper_info.info); } EXPORT_SYMBOL(netdev_upper_dev_unlink); @@ -6475,11 +6483,13 @@ EXPORT_SYMBOL(netdev_upper_dev_unlink); void netdev_bonding_info_change(struct net_device *dev, struct netdev_bonding_info *bonding_info) { - struct netdev_notifier_bonding_info info; + struct netdev_notifier_bonding_info info = { + .info.dev = dev, + }; memcpy(&info.bonding_info, bonding_info, sizeof(struct netdev_bonding_info)); - call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev, + call_netdevice_notifiers_info(NETDEV_BONDING_INFO, &info.info); } EXPORT_SYMBOL(netdev_bonding_info_change); @@ -6605,11 +6615,13 @@ EXPORT_SYMBOL(dev_get_nest_level); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info) { - struct netdev_notifier_changelowerstate_info changelowerstate_info; + struct netdev_notifier_changelowerstate_info changelowerstate_info = { + .info.dev = lower_dev, + }; ASSERT_RTNL(); changelowerstate_info.lower_state_info = lower_state_info; - call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev, + call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, &changelowerstate_info.info); } EXPORT_SYMBOL(netdev_lower_state_changed); @@ -6900,11 +6912,14 @@ void __dev_notify_flags(struct net_devic if (dev->flags & IFF_UP && (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) { - struct netdev_notifier_change_info change_info; + struct netdev_notifier_change_info change_info = { + .info = { + .dev = dev, + }, + .flags_changed = changes, + }; - change_info.flags_changed = changes; - call_netdevice_notifiers_info(NETDEV_CHANGE, dev, - &change_info.info); + call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info); } } --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -812,6 +812,32 @@ void napi_consume_skb(struct sk_buff *sk } EXPORT_SYMBOL(napi_consume_skb); +/** + * skb_recycle - clean up an skb for reuse + * @skb: buffer + * + * Recycles the skb to be reused as a receive buffer. This + * function does any necessary reference count dropping, and + * cleans up the skbuff as if it just came from __alloc_skb(). + */ +void skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + u8 head_frag = skb->head_frag; + + skb_release_head_state(skb); + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb->head_frag = head_frag; + skb_reset_tail_pointer(skb); +} +EXPORT_SYMBOL(skb_recycle); + /* Make sure a field is enclosed inside headers_start/headers_end section */ #define CHECK_SKB_FIELD(field) \ BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ @@ -1331,7 +1357,7 @@ static void skb_headers_offset_update(st skb->inner_mac_header += off; } -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) +void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) { __copy_skb_header(new, old); @@ -1339,6 +1365,7 @@ static void copy_skb_header(struct sk_bu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; } +EXPORT_SYMBOL(copy_skb_header); static inline int skb_alloc_rx_flag(const struct sk_buff *skb) { --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -178,6 +178,12 @@ HOSTLOADLIBES_syscall_tp += -lelf LLC ?= llc CLANG ?= clang +# Detect that we're cross compiling and use the cross compiler +ifdef CROSS_COMPILE +HOSTCC = $(CROSS_COMPILE)gcc +CLANG_ARCH_ARGS = -target $(ARCH) +endif + # Trick to allow make to be run from this directory all: $(LIBBPF) $(MAKE) -C ../../ $(CURDIR)/ @@ -228,9 +234,9 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nr $(obj)/%.o: $(src)/%.c $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ -I$(srctree)/tools/testing/selftests/bpf/ \ - -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ - -Wno-compare-distinct-pointer-types \ + -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ + -D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ - -Wno-unknown-warning-option \ + -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -266,7 +266,7 @@ int stress_hash_map_lookup(struct pt_reg return 0; } -SEC("kprobe/sys_getpgrp") +SEC("kprobe/sys_getppid") int stress_array_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -282,7 +282,7 @@ static void test_array_lookup(int cpu) start_time = time_get_ns(); for (i = 0; i < max_cnt; i++) - syscall(__NR_getpgrp, 0); + syscall(__NR_getppid, 0); printf("%d:array_lookup %lld lookups per sec\n", cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time)); } --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -110,7 +110,47 @@ static int (*bpf_skb_under_cgroup)(void static int (*bpf_skb_change_head)(void *, int len, int flags) = (void *) BPF_FUNC_skb_change_head; +/* Scan the ARCH passed in from ARCH env variable (see Makefile) */ +#if defined(__TARGET_ARCH_x86) + #define bpf_target_x86 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_s930x) + #define bpf_target_s930x + #define bpf_target_defined +#elif defined(__TARGET_ARCH_arm64) + #define bpf_target_arm64 + #define bpf_target_defined +#elif defined(__TARGET_ARCH_mips) + #define bpf_target_mips + #define bpf_target_defined +#elif defined(__TARGET_ARCH_powerpc) + #define bpf_target_powerpc + #define bpf_target_defined +#elif defined(__TARGET_ARCH_sparc) + #define bpf_target_sparc + #define bpf_target_defined +#else + #undef bpf_target_defined +#endif + +/* Fall back to what the compiler says */ +#ifndef bpf_target_defined #if defined(__x86_64__) + #define bpf_target_x86 +#elif defined(__s390x__) + #define bpf_target_s930x +#elif defined(__aarch64__) + #define bpf_target_arm64 +#elif defined(__mips__) + #define bpf_target_mips +#elif defined(__powerpc__) + #define bpf_target_powerpc +#elif defined(__sparc__) + #define bpf_target_sparc +#endif +#endif + +#if defined(bpf_target_x86) #define PT_REGS_PARM1(x) ((x)->di) #define PT_REGS_PARM2(x) ((x)->si) @@ -123,7 +163,7 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->ip) -#elif defined(__s390x__) +#elif defined(bpf_target_s390x) #define PT_REGS_PARM1(x) ((x)->gprs[2]) #define PT_REGS_PARM2(x) ((x)->gprs[3]) @@ -136,7 +176,7 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_SP(x) ((x)->gprs[15]) #define PT_REGS_IP(x) ((x)->psw.addr) -#elif defined(__aarch64__) +#elif defined(bpf_target_arm64) #define PT_REGS_PARM1(x) ((x)->regs[0]) #define PT_REGS_PARM2(x) ((x)->regs[1]) @@ -149,7 +189,7 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->pc) -#elif defined(__mips__) +#elif defined(bpf_target_mips) #define PT_REGS_PARM1(x) ((x)->regs[4]) #define PT_REGS_PARM2(x) ((x)->regs[5]) @@ -162,7 +202,7 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_SP(x) ((x)->regs[29]) #define PT_REGS_IP(x) ((x)->cp0_epc) -#elif defined(__powerpc__) +#elif defined(bpf_target_powerpc) #define PT_REGS_PARM1(x) ((x)->gpr[3]) #define PT_REGS_PARM2(x) ((x)->gpr[4]) @@ -173,7 +213,7 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->nip) -#elif defined(__sparc__) +#elif defined(bpf_target_sparc) #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) #define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1]) @@ -183,6 +223,8 @@ static int (*bpf_skb_change_head)(void * #define PT_REGS_RET(x) ((x)->u_regs[UREG_I7]) #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) + +/* Should this also be a bpf_target check for the sparc case? */ #if defined(__arch64__) #define PT_REGS_IP(x) ((x)->tpc) #else @@ -191,10 +233,10 @@ static int (*bpf_skb_change_head)(void * #endif -#ifdef __powerpc__ +#ifdef bpf_target_powerpc #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP -#elif defined(__sparc__) +#elif bpf_target_sparc #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); }) #define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP #else