summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2013-12-19 15:08:03 +0100
committerJiri Kosina <jkosina@suse.cz>2013-12-19 15:08:32 +0100
commite23c34bb41da65f354fb7eee04300c56ee48f60c (patch)
tree549fbe449d55273b81ef104a9755109bf4ae7817 /include/linux
parentb481c2cb3534c85dca625973b33eba15f9af3e4c (diff)
parent319e2e3f63c348a9b66db4667efa73178e18b17d (diff)
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things in tree (efi-stub). Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h107
-rw-r--r--include/linux/acpi_gpio.h31
-rw-r--r--include/linux/aio.h21
-rw-r--r--include/linux/amba/bus.h4
-rw-r--r--include/linux/amba/pl080.h1
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/ata.h7
-rw-r--r--include/linux/atmel_serial.h1
-rw-r--r--include/linux/audit.h15
-rw-r--r--include/linux/backing-dev.h7
-rw-r--r--include/linux/backlight.h4
-rw-r--r--include/linux/balloon_compaction.h25
-rw-r--r--include/linux/bcma/bcma_driver_pci.h1
-rw-r--r--include/linux/binfmts.h8
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/bitops.h11
-rw-r--r--include/linux/blk-mq.h183
-rw-r--r--include/linux/blk_types.h68
-rw-r--r--include/linux/blkdev.h74
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/capability.h1
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup.h37
-rw-r--r--include/linux/clk-private.h3
-rw-r--r--include/linux/clk-provider.h32
-rw-r--r--include/linux/clk/mxs.h2
-rw-r--r--include/linux/clk/sunxi.h22
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cmdline-parser.h45
-rw-r--r--include/linux/compat.h7
-rw-r--r--include/linux/compiler-gcc4.h15
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/completion.h30
-rw-r--r--include/linux/coredump.h10
-rw-r--r--include/linux/cper.h13
-rw-r--r--include/linux/cpu.h16
-rw-r--r--include/linux/cpu_rmap.h3
-rw-r--r--include/linux/cpufreq.h84
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_dump.h9
-rw-r--r--include/linux/crc-t10dif.h4
-rw-r--r--include/linux/crc32.h40
-rw-r--r--include/linux/dcache.h132
-rw-r--r--include/linux/debugfs.h12
-rw-r--r--include/linux/devfreq.h6
-rw-r--r--include/linux/device-mapper.h12
-rw-r--r--include/linux/device.h78
-rw-r--r--include/linux/dma-contiguous.h62
-rw-r--r--include/linux/dma-mapping.h31
-rw-r--r--include/linux/dma/mmp-pdma.h15
-rw-r--r--include/linux/dmaengine.h128
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/edac.h2
-rw-r--r--include/linux/efi.h62
-rw-r--r--include/linux/elf.h6
-rw-r--r--include/linux/elfcore.h7
-rw-r--r--include/linux/etherdevice.h35
-rw-r--r--include/linux/eventfd.h3
-rw-r--r--include/linux/export.h4
-rw-r--r--include/linux/extcon.h72
-rw-r--r--include/linux/extcon/extcon-adc-jack.h42
-rw-r--r--include/linux/extcon/extcon-gpio.h20
-rw-r--r--include/linux/fb.h12
-rw-r--r--include/linux/fcdevice.h2
-rw-r--r--include/linux/fddidevice.h7
-rw-r--r--include/linux/filter.h15
-rw-r--r--include/linux/fs.h146
-rw-r--r--include/linux/fs_struct.h11
-rw-r--r--include/linux/fscache-cache.h54
-rw-r--r--include/linux/fscache.h153
-rw-r--r--include/linux/fsl/mxs-dma.h20
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/ftrace_event.h41
-rw-r--r--include/linux/genalloc.h6
-rw-r--r--include/linux/genl_magic_func.h53
-rw-r--r--include/linux/gpio.h67
-rw-r--r--include/linux/gpio/consumer.h253
-rw-r--r--include/linux/gpio/driver.h194
-rw-r--r--include/linux/hardirq.h12
-rw-r--r--include/linux/hashtable.h15
-rw-r--r--include/linux/hid-sensor-hub.h23
-rw-r--r--include/linux/hid-sensor-ids.h12
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hippidevice.h10
-rw-r--r--include/linux/host1x.h284
-rw-r--r--include/linux/huge_mm.h20
-rw-r--r--include/linux/hugetlb.h60
-rw-r--r--include/linux/hwmon-vid.h2
-rw-r--r--include/linux/hwmon.h10
-rw-r--r--include/linux/hyperv.h44
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/i8042.h24
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h24
-rw-r--r--include/linux/if_macvlan.h18
-rw-r--r--include/linux/if_vlan.h101
-rw-r--r--include/linux/iio/buffer.h61
-rw-r--r--include/linux/iio/common/st_sensors.h6
-rw-r--r--include/linux/iio/consumer.h2
-rw-r--r--include/linux/iio/events.h14
-rw-r--r--include/linux/iio/iio.h93
-rw-r--r--include/linux/iio/sysfs.h15
-rw-r--r--include/linux/iio/types.h20
-rw-r--r--include/linux/inetdevice.h28
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/interrupt.h108
-rw-r--r--include/linux/iommu.h18
-rw-r--r--include/linux/ipc_namespace.h8
-rw-r--r--include/linux/ipv6.h77
-rw-r--r--include/linux/irq.h16
-rw-r--r--include/linux/irqchip/arm-gic.h8
-rw-r--r--include/linux/irqchip/mmp.h6
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/irqnr.h19
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/jump_label.h14
-rw-r--r--include/linux/jump_label_ratelimit.h2
-rw-r--r--include/linux/kdb.h1
-rw-r--r--include/linux/kernel-page-flags.h1
-rw-r--r--include/linux/kernel.h16
-rw-r--r--include/linux/kernel_stat.h34
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/key-type.h6
-rw-r--r--include/linux/key.h52
-rw-r--r--include/linux/kfifo.h47
-rw-r--r--include/linux/kgdb.h1
-rw-r--r--include/linux/kobj_completion.h18
-rw-r--r--include/linux/kobject.h1
-rw-r--r--include/linux/kobject_ns.h2
-rw-r--r--include/linux/kprobes.h34
-rw-r--r--include/linux/kvm_host.h43
-rw-r--r--include/linux/lglock.h10
-rw-r--r--include/linux/list.h79
-rw-r--r--include/linux/list_lru.h131
-rw-r--r--include/linux/llist.h2
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/lockref.h16
-rw-r--r--include/linux/lz4.h8
-rw-r--r--include/linux/math64.h13
-rw-r--r--include/linux/mbus.h16
-rw-r--r--include/linux/memblock.h26
-rw-r--r--include/linux/memcontrol.h65
-rw-r--r--include/linux/memory_hotplug.h11
-rw-r--r--include/linux/mempolicy.h17
-rw-r--r--include/linux/mfd/arizona/registers.h2
-rw-r--r--include/linux/mfd/as3722.h423
-rw-r--r--include/linux/mfd/core.h8
-rw-r--r--include/linux/mfd/da9052/da9052.h20
-rw-r--r--include/linux/mfd/da9063/core.h93
-rw-r--r--include/linux/mfd/da9063/pdata.h111
-rw-r--r--include/linux/mfd/da9063/registers.h1028
-rw-r--r--include/linux/mfd/davinci_voicecodec.h3
-rw-r--r--include/linux/mfd/dbx500-prcmu.h70
-rw-r--r--include/linux/mfd/max77693-private.h1
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/mc13xxx.h7
-rw-r--r--include/linux/mfd/mcp.h2
-rw-r--r--include/linux/mfd/palmas.h50
-rw-r--r--include/linux/mfd/rtsx_common.h3
-rw-r--r--include/linux/mfd/rtsx_pci.h119
-rw-r--r--include/linux/mfd/samsung/core.h4
-rw-r--r--include/linux/mfd/samsung/rtc.h11
-rw-r--r--include/linux/mfd/samsung/s2mps11.h6
-rw-r--r--include/linux/mfd/stw481x.h56
-rw-r--r--include/linux/mfd/syscon.h25
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h13
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h30
-rw-r--r--include/linux/mfd/tmio.h1
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/mfd/ucb1x00.h1
-rw-r--r--include/linux/mfd/wm8994/core.h47
-rw-r--r--include/linux/micrel_phy.h8
-rw-r--r--include/linux/migrate.h15
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cmd.h6
-rw-r--r--include/linux/mlx4/device.h25
-rw-r--r--include/linux/mlx5/device.h17
-rw-r--r--include/linux/mlx5/driver.h24
-rw-r--r--include/linux/mm.h304
-rw-r--r--include/linux/mm_inline.h1
-rw-r--r--include/linux/mm_types.h67
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/card.h7
-rw-r--r--include/linux/mmc/core.h6
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/mmc/sh_mmcif.h6
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h2
-rw-r--r--include/linux/mmc/slot-gpio.h3
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msg.h6
-rw-r--r--include/linux/msi.h33
-rw-r--r--include/linux/mtd/bbm.h4
-rw-r--r--include/linux/mtd/fsmc.h1
-rw-r--r--include/linux/mtd/map.h4
-rw-r--r--include/linux/mtd/mtd.h11
-rw-r--r--include/linux/mtd/nand.h99
-rw-r--r--include/linux/mutex.h8
-rw-r--r--include/linux/namei.h6
-rw-r--r--include/linux/net.h119
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h551
-rw-r--r--include/linux/netfilter.h24
-rw-r--r--include/linux/netfilter/ipset/ip_set.h167
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h57
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h14
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h162
-rw-r--r--include/linux/netfilter/nfnetlink.h29
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h6
-rw-r--r--include/linux/netfilter/x_tables.h128
-rw-r--r--include/linux/netfilter_bridge.h4
-rw-r--r--include/linux/netfilter_ipv4.h6
-rw-r--r--include/linux/netfilter_ipv6.h10
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs.h27
-rw-r--r--include/linux/nfs_fs_sb.h23
-rw-r--r--include/linux/nfs_xdr.h52
-rw-r--r--include/linux/nvme.h466
-rw-r--r--include/linux/of.h84
-rw-r--r--include/linux/of_address.h39
-rw-r--r--include/linux/of_fdt.h23
-rw-r--r--include/linux/of_gpio.h29
-rw-r--r--include/linux/of_irq.h67
-rw-r--r--include/linux/of_mtd.h21
-rw-r--r--include/linux/of_net.h4
-rw-r--r--include/linux/of_pci.h17
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/opp.h134
-rw-r--r--include/linux/padata.h3
-rw-r--r--include/linux/page-flags-layout.h28
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci.h106
-rw-r--r--include/linux/pci_hotplug.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pcieport_if.h2
-rw-r--r--include/linux/percpu.h40
-rw-r--r--include/linux/percpu_ida.h81
-rw-r--r--include/linux/perf_event.h29
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy/phy.h270
-rw-r--r--include/linux/pid_namespace.h1
-rw-r--r--include/linux/pinctrl/pinctrl.h3
-rw-r--r--include/linux/platform_data/at24.h (renamed from include/linux/i2c/at24.h)2
-rw-r--r--include/linux/platform_data/atmel.h4
-rw-r--r--include/linux/platform_data/bd6107.h19
-rw-r--r--include/linux/platform_data/clk-nomadik.h2
-rw-r--r--include/linux/platform_data/clk-ux500.h3
-rw-r--r--include/linux/platform_data/davinci_asp.h2
-rw-r--r--include/linux/platform_data/dma-rcar-hpbdma.h103
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h46
-rw-r--r--include/linux/platform_data/edma.h10
-rw-r--r--include/linux/platform_data/exynos_thermal.h119
-rw-r--r--include/linux/platform_data/gpio-davinci.h60
-rw-r--r--include/linux/platform_data/gpio-em.h1
-rw-r--r--include/linux/platform_data/gpio_backlight.h21
-rw-r--r--include/linux/platform_data/leds-lp55xx.h17
-rw-r--r--include/linux/platform_data/leds-pca963x.h (renamed from include/linux/platform_data/leds-pca9633.h)25
-rw-r--r--include/linux/platform_data/leds-pca9685.h35
-rw-r--r--include/linux/platform_data/leds-renesas-tpu.h14
-rw-r--r--include/linux/platform_data/lm3630_bl.h57
-rw-r--r--include/linux/platform_data/lm3630a_bl.h65
-rw-r--r--include/linux/platform_data/lp855x.h19
-rw-r--r--include/linux/platform_data/lv5207lp.h19
-rw-r--r--include/linux/platform_data/mipi-csis.h9
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h5
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h18
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h13
-rw-r--r--include/linux/platform_data/pca953x.h (renamed from include/linux/i2c/pca953x.h)0
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pinctrl-single.h12
-rw-r--r--include/linux/platform_data/usb-ehci-s5p.h21
-rw-r--r--include/linux/platform_data/usb-ohci-exynos.h21
-rw-r--r--include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--include/linux/platform_data/zforce_ts.h26
-rw-r--r--include/linux/platform_device.h1
-rw-r--r--include/linux/pm_opp.h139
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/power/bq24735-charger.h (renamed from include/linux/irqchip/bcm2835.h)26
-rw-r--r--include/linux/power/twl4030_madc_battery.h39
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/powercap.h325
-rw-r--r--include/linux/preempt.h112
-rw-r--r--include/linux/preempt_mask.h41
-rw-r--r--include/linux/printk.h16
-rw-r--r--include/linux/pwm_backlight.h5
-rw-r--r--include/linux/quota.h1
-rw-r--r--include/linux/radix-tree.h1
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/ramfs.h2
-rw-r--r--include/linux/random.h15
-rw-r--r--include/linux/rbtree.h24
-rw-r--r--include/linux/rculist.h23
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rcutiny.h17
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h53
-rw-r--r--include/linux/regulator/consumer.h79
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/machine.h7
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h202
-rw-r--r--include/linux/sched/sysctl.h3
-rw-r--r--include/linux/sched_clock.h4
-rw-r--r--include/linux/security.h34
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/seqlock.h176
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serial_sci.h2
-rw-r--r--include/linux/sfi.h3
-rw-r--r--include/linux/sh_dma.h55
-rw-r--r--include/linux/shdma-base.h3
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/shrinker.h54
-rw-r--r--include/linux/skbuff.h322
-rw-r--r--include/linux/slab.h265
-rw-r--r--include/linux/slab_def.h110
-rw-r--r--include/linux/slob_def.h31
-rw-r--r--include/linux/slub_def.h112
-rw-r--r--include/linux/smp.h89
-rw-r--r--include/linux/spi/mmc_spi.h19
-rw-r--r--include/linux/spi/rspi.h2
-rw-r--r--include/linux/spi/spi.h61
-rw-r--r--include/linux/srcu.h14
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stop_machine.h1
-rw-r--r--include/linux/sunrpc/auth.h28
-rw-r--r--include/linux/sunrpc/cache.h22
-rw-r--r--include/linux/sunrpc/clnt.h12
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h39
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swap.h54
-rw-r--r--include/linux/swapops.h7
-rw-r--r--include/linux/syscalls.h9
-rw-r--r--include/linux/sysfs.h88
-rw-r--r--include/linux/sysrq.h3
-rw-r--r--include/linux/tc_act/tc_defact.h19
-rw-r--r--include/linux/tegra-cpuidle.h25
-rw-r--r--include/linux/tegra-powergate.h36
-rw-r--r--include/linux/thermal.h18
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thread_info.h17
-rw-r--r--include/linux/time-armada-370-xp.h18
-rw-r--r--include/linux/timex.h15
-rw-r--r--include/linux/topology.h6
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/tty.h29
-rw-r--r--include/linux/u64_stats_sync.h7
-rw-r--r--include/linux/uaccess.h8
-rw-r--r--include/linux/uprobes.h15
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/usb/cdc_ncm.h33
-rw-r--r--include/linux/usb/hcd.h16
-rw-r--r--include/linux/usb/intel_mid_otg.h180
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/linux/usb/omap_control_usb.h33
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/usb_phy_gen_xceiv.h5
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/usb/wusb-wa.h51
-rw-r--r--include/linux/usb/wusb.h2
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/user_namespace.h10
-rw-r--r--include/linux/vfio.h7
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/virtio_config.h161
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/vm_event_item.h7
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait.h393
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/xattr.h2
-rw-r--r--include/linux/yam.h2
390 files changed, 10951 insertions, 4499 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a5db4aeefa36..d9099b15b472 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -44,6 +44,20 @@
#include <acpi/acpi_numa.h>
#include <asm/acpi.h>
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return adev ? adev->handle : NULL;
+}
+
+#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion)
+#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev)
+#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return dev_name(&adev->dev);
+}
+
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
ACPI_IRQ_MODEL_IOAPIC,
@@ -116,7 +130,7 @@ void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int *pcpu);
+int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
int acpi_unmap_lsapic(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
@@ -294,58 +308,52 @@ void __init acpi_nvs_nosave_s3(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
- char *uuid_str; /* uuid string */
+ char *uuid_str; /* UUID string */
int rev;
- struct acpi_buffer cap; /* arg2/arg3 */
- struct acpi_buffer ret; /* free by caller if success */
+ struct acpi_buffer cap; /* list of DWORD capabilities */
+ struct acpi_buffer ret; /* free by caller if success */
};
-#define OSC_QUERY_TYPE 0
-#define OSC_SUPPORT_TYPE 1
-#define OSC_CONTROL_TYPE 2
-
-/* _OSC DW0 Definition */
-#define OSC_QUERY_ENABLE 1
-#define OSC_REQUEST_ERROR 2
-#define OSC_INVALID_UUID_ERROR 4
-#define OSC_INVALID_REVISION_ERROR 8
-#define OSC_CAPABILITIES_MASK_ERROR 16
-
+acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT 1
-#define OSC_SB_PPC_OST_SUPPORT 2
-#define OSC_SB_PR3_SUPPORT 4
-#define OSC_SB_HOTPLUG_OST_SUPPORT 8
-#define OSC_SB_APEI_SUPPORT 16
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD 0 /* DWORD 1 */
+#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
+#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE 0x00000001 /* input */
+#define OSC_REQUEST_ERROR 0x00000002 /* return */
+#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */
+#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */
+#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT 0x00000001
+#define OSC_SB_PPC_OST_SUPPORT 0x00000002
+#define OSC_SB_PR3_SUPPORT 0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008
+#define OSC_SB_APEI_SUPPORT 0x00000010
+#define OSC_SB_CPC_SUPPORT 0x00000020
extern bool osc_sb_apei_support_acked;
-/* PCI defined _OSC bits */
-/* _OSC DW1 Definition (OS Support Fields) */
-#define OSC_EXT_PCI_CONFIG_SUPPORT 1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
-#define OSC_MSI_SUPPORT 16
-#define OSC_PCI_SUPPORT_MASKS 0x1f
-
-/* _OSC DW1 Definition (OS Control Fields) */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
-#define OSC_SHPC_NATIVE_HP_CONTROL 2
-#define OSC_PCI_EXPRESS_PME_CONTROL 4
-#define OSC_PCI_EXPRESS_AER_CONTROL 8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
-
-#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
- OSC_SHPC_NATIVE_HP_CONTROL | \
- OSC_PCI_EXPRESS_PME_CONTROL | \
- OSC_PCI_EXPRESS_AER_CONTROL | \
- OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
- OSC_SHPC_NATIVE_HP_CONTROL)
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
+#define OSC_PCI_ASPM_SUPPORT 0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
+#define OSC_PCI_MSI_SUPPORT 0x00000010
+#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
+#define OSC_PCI_CONTROL_MASKS 0x0000001f
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
@@ -407,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev,
#define acpi_disabled 1
+#define ACPI_COMPANION(dev) (NULL)
+#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
+#define ACPI_HANDLE(dev) (NULL)
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline void acpi_early_init(void) { }
static inline int early_acpi_boot_init(void)
diff --git a/include/linux/acpi_gpio.h b/include/linux/acpi_gpio.h
index 4c120a1e0ca3..d875bc3dba3c 100644
--- a/include/linux/acpi_gpio.h
+++ b/include/linux/acpi_gpio.h
@@ -2,36 +2,35 @@
#define _LINUX_ACPI_GPIO_H_
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
/**
* struct acpi_gpio_info - ACPI GPIO specific information
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @active_low: in case of @gpioint, the pin is active low
*/
struct acpi_gpio_info {
bool gpioint;
+ bool active_low;
};
#ifdef CONFIG_GPIO_ACPI
-int acpi_get_gpio(char *path, int pin);
-int acpi_get_gpio_by_index(struct device *dev, int index,
- struct acpi_gpio_info *info);
+struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info);
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
#else /* CONFIG_GPIO_ACPI */
-static inline int acpi_get_gpio(char *path, int pin)
+static inline struct gpio_desc *
+acpi_get_gpiod_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info)
{
- return -ENODEV;
-}
-
-static inline int acpi_get_gpio_by_index(struct device *dev, int index,
- struct acpi_gpio_info *info)
-{
- return -ENODEV;
+ return ERR_PTR(-ENOSYS);
}
static inline void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
@@ -39,4 +38,14 @@ static inline void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
#endif /* CONFIG_GPIO_ACPI */
+static inline int acpi_get_gpio_by_index(struct device *dev, int index,
+ struct acpi_gpio_info *info)
+{
+ struct gpio_desc *desc = acpi_get_gpiod_by_index(dev, index, info);
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ return desc_to_gpio(desc);
+}
+
#endif /* _LINUX_ACPI_GPIO_H_ */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 1bdf965339f9..d9c92daa3944 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -27,15 +27,13 @@ struct kiocb;
*/
#define KIOCB_CANCELLED ((void *) (~0ULL))
-typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
+typedef int (kiocb_cancel_fn)(struct kiocb *);
struct kiocb {
- atomic_t ki_users;
-
struct file *ki_filp;
struct kioctx *ki_ctx; /* NULL for sync ops */
kiocb_cancel_fn *ki_cancel;
- void (*ki_dtor)(struct kiocb *);
+ void *private;
union {
void __user *user;
@@ -44,17 +42,7 @@ struct kiocb {
__u64 ki_user_data; /* user's data for completion */
loff_t ki_pos;
-
- void *private;
- /* State that we remember to be able to restart/retry */
- unsigned short ki_opcode;
- size_t ki_nbytes; /* copy of iocb->aio_nbytes */
- char __user *ki_buf; /* remaining iocb->aio_buf */
- size_t ki_left; /* remaining bytes */
- struct iovec ki_inline_vec; /* inline vector */
- struct iovec *ki_iovec;
- unsigned long ki_nr_segs;
- unsigned long ki_cur_seg;
+ size_t ki_nbytes; /* copy of iocb->aio_nbytes */
struct list_head ki_list; /* the aio core uses this
* for cancellation */
@@ -74,7 +62,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
- .ki_users = ATOMIC_INIT(1),
.ki_ctx = NULL,
.ki_filp = filp,
.ki_obj.tsk = current,
@@ -84,7 +71,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
/* prototypes */
#ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_put_req(struct kiocb *iocb);
extern void aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
@@ -93,7 +79,6 @@ extern long do_io_submit(aio_context_t ctx_id, long nr,
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_put_req(struct kiocb *iocb) { }
static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a80..63b5eff0a80f 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -21,7 +21,7 @@
#include <linux/resource.h>
#include <linux/regulator/consumer.h>
-#define AMBA_NR_IRQS 2
+#define AMBA_NR_IRQS 9
#define AMBA_CID 0xb105f00d
struct clk;
@@ -30,7 +30,6 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
- u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
};
@@ -131,7 +130,6 @@ struct amba_device name##_device = { \
struct amba_device name##_device = { \
.dev = __AMBA_DEV(busid, data, ~0ULL), \
.res = DEFINE_RES_MEM(base, SZ_4K), \
- .dma_mask = ~0ULL, \
.irq = irqs, \
.periphid = id, \
}
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 3e7b62fbefbd..91b84a7f0539 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -87,6 +87,7 @@
#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
#define PL080_CONTROL_SB_SIZE_SHIFT (12)
#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
+#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0)
#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
#define PL080_BSIZE_1 (0x0)
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000000..a89df3be1686
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_H
+#define _LINUX_ASSOC_ARRAY_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/types.h>
+
+#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
+
+/*
+ * Generic associative array.
+ */
+struct assoc_array {
+ struct assoc_array_ptr *root; /* The node at the root of the tree */
+ unsigned long nr_leaves_on_tree;
+};
+
+/*
+ * Operations on objects and index keys for use by array manipulation routines.
+ */
+struct assoc_array_ops {
+ /* Method to get a chunk of an index key from caller-supplied data */
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ /* Method to get a piece of an object's index key */
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ /* Is this the object we're looking for? */
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ /* How different is an object from an index key, to a bit position in
+ * their keys? (or -1 if they're the same)
+ */
+ int (*diff_objects)(const void *object, const void *index_key);
+
+ /* Method to free an object. */
+ void (*free_object)(void *object);
+};
+
+/*
+ * Access and manipulation functions.
+ */
+struct assoc_array_edit;
+
+static inline void assoc_array_init(struct assoc_array *array)
+{
+ array->root = NULL;
+ array->nr_leaves_on_tree = 0;
+}
+
+extern int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+extern void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
+ void *object);
+extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
+extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+extern int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000000..711275e6681c
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
+/* Private definitions for the generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
+#define _LINUX_ASSOC_ARRAY_PRIV_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/assoc_array.h>
+
+#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
+#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
+#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
+#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
+
+/*
+ * Undefined type representing a pointer with type information in the bottom
+ * two bits.
+ */
+struct assoc_array_ptr;
+
+/*
+ * An N-way node in the tree.
+ *
+ * Each slot contains one of four things:
+ *
+ * (1) Nothing (NULL).
+ *
+ * (2) A leaf object (pointer types 0).
+ *
+ * (3) A next-level node (pointer type 1, subtype 0).
+ *
+ * (4) A shortcut (pointer type 1, subtype 1).
+ *
+ * The tree is optimised for search-by-ID, but permits reasonable iteration
+ * also.
+ *
+ * The tree is navigated by constructing an index key consisting of an array of
+ * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
+ *
+ * The segments correspond to levels of the tree (the first segment is used at
+ * level 0, the second at level 1, etc.).
+ */
+struct assoc_array_node {
+ struct assoc_array_ptr *back_pointer;
+ u8 parent_slot;
+ struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
+ unsigned long nr_leaves_on_branch;
+};
+
+/*
+ * A shortcut through the index space out to where a collection of nodes/leaves
+ * with the same IDs live.
+ */
+struct assoc_array_shortcut {
+ struct assoc_array_ptr *back_pointer;
+ int parent_slot;
+ int skip_to_level;
+ struct assoc_array_ptr *next_node;
+ unsigned long index_key[];
+};
+
+/*
+ * Preallocation cache.
+ */
+struct assoc_array_edit {
+ struct rcu_head rcu;
+ struct assoc_array *array;
+ const struct assoc_array_ops *ops;
+ const struct assoc_array_ops *ops_for_excised_subtree;
+ struct assoc_array_ptr *leaf;
+ struct assoc_array_ptr **leaf_p;
+ struct assoc_array_ptr *dead_leaf;
+ struct assoc_array_ptr *new_meta[3];
+ struct assoc_array_ptr *excised_meta[1];
+ struct assoc_array_ptr *excised_subtree;
+ struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
+ struct assoc_array_ptr *set_backpointers_to;
+ struct assoc_array_node *adjust_count_on;
+ long adjust_count_by;
+ struct {
+ struct assoc_array_ptr **ptr;
+ struct assoc_array_ptr *to;
+ } set[2];
+ struct {
+ u8 *p;
+ u8 to;
+ } set_parent_slot[1];
+ u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
+};
+
+/*
+ * Internal tree member pointers are marked in the bottom one or two bits to
+ * indicate what type they are so that we don't have to look behind every
+ * pointer to see what it points to.
+ *
+ * We provide functions to test type annotations and to create and translate
+ * the annotated pointers.
+ */
+#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
+#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
+#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
+#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
+#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
+#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
+
+static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_meta(x);
+}
+static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_shortcut(x);
+}
+
+static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
+{
+ return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+
+static inline
+unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x &
+ ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+static inline
+struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
+}
+static inline
+struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
+}
+
+static inline
+struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
+{
+ return (struct assoc_array_ptr *)((unsigned long)p | t);
+}
+static inline
+struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
+{
+ return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
+}
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index bf4c69ca76df..f2f4d8da97c0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -219,6 +219,7 @@ enum {
ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
ATA_CMD_DOWNLOAD_MICRO = 0x92,
+ ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93,
ATA_CMD_NOP = 0x00,
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
@@ -268,12 +269,15 @@ enum {
ATA_CMD_WRITE_LOG_EXT = 0x3F,
ATA_CMD_READ_LOG_DMA_EXT = 0x47,
ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+ ATA_CMD_TRUSTED_NONDATA = 0x5B,
ATA_CMD_TRUSTED_RCV = 0x5C,
ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
ATA_CMD_TRUSTED_SND = 0x5E,
ATA_CMD_TRUSTED_SND_DMA = 0x5F,
ATA_CMD_PMP_READ = 0xE4,
+ ATA_CMD_PMP_READ_DMA = 0xE9,
ATA_CMD_PMP_WRITE = 0xE8,
+ ATA_CMD_PMP_WRITE_DMA = 0xEB,
ATA_CMD_CONF_OVERLAY = 0xB1,
ATA_CMD_SEC_SET_PASS = 0xF1,
ATA_CMD_SEC_UNLOCK = 0xF2,
@@ -292,6 +296,9 @@ enum {
ATA_CMD_CFA_TRANS_SECT = 0x87,
ATA_CMD_CFA_ERASE = 0xC0,
ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
+ ATA_CMD_REQ_SENSE_DATA = 0x0B,
+ ATA_CMD_SANITIZE_DEVICE = 0xB4,
+
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index be201ca2990c..00beddf6be20 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -125,5 +125,6 @@
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 729a4d165bcc..a40641954c29 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -73,6 +73,8 @@ struct audit_field {
void *lsm_rule;
};
+extern int is_audit_feature_set(int which);
+
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
@@ -207,7 +209,7 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
-extern int __audit_bprm(struct linux_binprm *bprm);
+extern void __audit_bprm(struct linux_binprm *bprm);
extern int __audit_socketcall(int nargs, unsigned long *args);
extern int __audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
@@ -236,11 +238,10 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid
if (unlikely(!audit_dummy_context()))
__audit_ipc_set_perm(qbytes, uid, gid, mode);
}
-static inline int audit_bprm(struct linux_binprm *bprm)
+static inline void audit_bprm(struct linux_binprm *bprm)
{
if (unlikely(!audit_dummy_context()))
- return __audit_bprm(bprm);
- return 0;
+ __audit_bprm(bprm);
}
static inline int audit_socketcall(int nargs, unsigned long *args)
{
@@ -367,10 +368,8 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
gid_t gid, umode_t mode)
{ }
-static inline int audit_bprm(struct linux_binprm *bprm)
-{
- return 0;
-}
+static inline void audit_bprm(struct linux_binprm *bprm)
+{ }
static inline int audit_socketcall(int nargs, unsigned long *args)
{
return 0;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c3881553f7d1..24819001f5c8 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -109,7 +109,7 @@ struct backing_dev_info {
#endif
};
-int bdi_init(struct backing_dev_info *bdi);
+int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi);
__printf(3, 4)
@@ -117,7 +117,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
-int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
@@ -243,6 +243,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_EXEC_MAP: Can be mapped for execution
*
* BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
+ *
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -254,6 +256,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_NO_ACCT_WB 0x00000080
#define BDI_CAP_SWAP_BACKED 0x00000100
#define BDI_CAP_STABLE_WRITES 0x00000200
+#define BDI_CAP_STRICTLIMIT 0x00000400
#define BDI_CAP_VMFLAGS \
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 53b77949c79d..5f9cd963213d 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -100,6 +100,9 @@ struct backlight_device {
/* The framebuffer notifier block */
struct notifier_block fb_notif;
+ /* list entry of all registered backlight devices */
+ struct list_head entry;
+
struct device dev;
};
@@ -123,6 +126,7 @@ extern void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
extern void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f7f1d7169b11..089743ade734 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -159,6 +159,26 @@ static inline bool balloon_page_movable(struct page *page)
}
/*
+ * isolated_balloon_page - identify an isolated balloon page on private
+ * compaction/migration page lists.
+ *
+ * After a compaction thread isolates a balloon page for migration, it raises
+ * the page refcount to prevent concurrent compaction threads from re-isolating
+ * the same page. For that reason putback_movable_pages(), or other routines
+ * that need to identify isolated balloon pages on private pagelists, cannot
+ * rely on balloon_page_movable() to accomplish the task.
+ */
+static inline bool isolated_balloon_page(struct page *page)
+{
+ /* Already isolated balloon pages, by default, have a raised refcount */
+ if (page_flags_cleared(page) && !page_mapped(page) &&
+ page_count(page) >= 2)
+ return __is_movable_balloon_page(page);
+
+ return false;
+}
+
+/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->mapping assignment accordingly.
* @page : page to be assigned as a 'balloon page'
@@ -243,6 +263,11 @@ static inline bool balloon_page_movable(struct page *page)
return false;
}
+static inline bool isolated_balloon_page(struct page *page)
+{
+ return false;
+}
+
static inline bool balloon_page_isolate(struct page *page)
{
return false;
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index d66033f418c9..0333e605ea0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -242,6 +242,7 @@ extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
extern void bcma_core_pci_up(struct bcma_bus *bus);
extern void bcma_core_pci_down(struct bcma_bus *bus);
+extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 70cf138690e9..fd8bf3219ef7 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -31,7 +31,7 @@ struct linux_binprm {
#ifdef __alpha__
unsigned int taso:1;
#endif
- unsigned int recursion_depth;
+ unsigned int recursion_depth; /* only for search_binary_handler() */
struct file * file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
@@ -56,11 +56,12 @@ struct linux_binprm {
/* Function parameter for binfmt->coredump */
struct coredump_params {
- siginfo_t *siginfo;
+ const siginfo_t *siginfo;
struct pt_regs *regs;
struct file *file;
unsigned long limit;
unsigned long mm_flags;
+ loff_t written;
};
/*
@@ -99,9 +100,6 @@ extern void setup_new_exec(struct linux_binprm * bprm);
extern void would_dump(struct linux_binprm *, struct file *);
extern int suid_dumpable;
-#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
-#define SUID_DUMP_USER 1 /* Dump as user of process */
-#define SUID_DUMP_ROOT 2 /* Dump as root */
/* Stack area protections */
#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ec48bac5b039..060ff695085c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,6 +218,7 @@ struct bio_pair {
};
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
+extern void bio_trim(struct bio *bio, int offset, int size);
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
@@ -419,6 +420,8 @@ static inline void bio_list_init(struct bio_list *bl)
bl->head = bl->tail = NULL;
}
+#define BIO_EMPTY_LIST { NULL, NULL }
+
#define bio_list_for_each(bio, bl) \
for (bio = (bl)->head; bio; bio = bio->bi_next)
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3b6b82108b9..abc9ca778456 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,12 +4,23 @@
#ifdef __KERNEL__
#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
new file mode 100644
index 000000000000..ab0e9b2025b3
--- /dev/null
+++ b/include/linux/blk-mq.h
@@ -0,0 +1,183 @@
+#ifndef BLK_MQ_H
+#define BLK_MQ_H
+
+#include <linux/blkdev.h>
+
+struct blk_mq_tags;
+
+struct blk_mq_cpu_notifier {
+ struct list_head list;
+ void *data;
+ void (*notify)(void *data, unsigned long action, unsigned int cpu);
+};
+
+struct blk_mq_hw_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head dispatch;
+ } ____cacheline_aligned_in_smp;
+
+ unsigned long state; /* BLK_MQ_S_* flags */
+ struct delayed_work delayed_work;
+
+ unsigned long flags; /* BLK_MQ_F_* flags */
+
+ struct request_queue *queue;
+ unsigned int queue_num;
+
+ void *driver_data;
+
+ unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ unsigned int nr_ctx_map;
+ unsigned long *ctx_map;
+
+ struct request **rqs;
+ struct list_head page_list;
+ struct blk_mq_tags *tags;
+
+ unsigned long queued;
+ unsigned long run;
+#define BLK_MQ_MAX_DISPATCH_ORDER 10
+ unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+
+ unsigned int queue_depth;
+ unsigned int numa_node;
+ unsigned int cmd_size; /* per-request extra data */
+
+ struct blk_mq_cpu_notifier cpu_notifier;
+ struct kobject kobj;
+};
+
+struct blk_mq_reg {
+ struct blk_mq_ops *ops;
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
+ unsigned int reserved_tags;
+ unsigned int cmd_size; /* per-request extra data */
+ int numa_node;
+ unsigned int timeout;
+ unsigned int flags; /* BLK_MQ_F_* */
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
+typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
+typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
+typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
+typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+
+struct blk_mq_ops {
+ /*
+ * Queue request
+ */
+ queue_rq_fn *queue_rq;
+
+ /*
+ * Map to specific hardware queue
+ */
+ map_queue_fn *map_queue;
+
+ /*
+ * Called on request timeout
+ */
+ rq_timed_out_fn *timeout;
+
+ /*
+ * Override for hctx allocations (should probably go)
+ */
+ alloc_hctx_fn *alloc_hctx;
+ free_hctx_fn *free_hctx;
+
+ /*
+ * Called when the block layer side of a hardware queue has been
+ * set up, allowing the driver to allocate/init matching structures.
+ * Ditto for exit/teardown.
+ */
+ init_hctx_fn *init_hctx;
+ exit_hctx_fn *exit_hctx;
+};
+
+enum {
+ BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
+ BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
+ BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
+
+ BLK_MQ_F_SHOULD_MERGE = 1 << 0,
+ BLK_MQ_F_SHOULD_SORT = 1 << 1,
+ BLK_MQ_F_SHOULD_IPI = 1 << 2,
+
+ BLK_MQ_S_STOPPED = 1 << 0,
+
+ BLK_MQ_MAX_DEPTH = 2048,
+};
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
+void blk_mq_free_queue(struct request_queue *);
+int blk_mq_register_disk(struct gendisk *);
+void blk_mq_unregister_disk(struct gendisk *);
+void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
+
+void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_run_queues(struct request_queue *q, bool async);
+void blk_mq_free_request(struct request *rq);
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
+struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
+
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
+void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
+
+void blk_mq_end_io(struct request *rq, int error);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_stop_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queues(struct request_queue *q);
+
+/*
+ * Driver command data is immediately after the request. So subtract request
+ * size to get back to the original request.
+ */
+static inline struct request *blk_mq_rq_from_pdu(void *pdu)
+{
+ return pdu - sizeof(struct request);
+}
+static inline void *blk_mq_rq_to_pdu(struct request *rq)
+{
+ return (void *) rq + sizeof(*rq);
+}
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
+ unsigned int tag)
+{
+ return hctx->rqs[tag];
+}
+
+#define queue_for_each_hw_ctx(q, hctx, i) \
+ for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \
+ (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
+
+#define queue_for_each_ctx(q, ctx, i) \
+ for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \
+ (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
+
+#define hctx_for_each_ctx(hctx, ctx, i) \
+ for ((i) = 0, ctx = (hctx)->ctxs[0]; \
+ (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
+
+#define blk_ctx_sum(q, sum) \
+({ \
+ struct blk_mq_ctx *__x; \
+ unsigned int __ret = 0, __i; \
+ \
+ queue_for_each_ctx((q), __x, __i) \
+ __ret += sum; \
+ __ret; \
+})
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index fa1abeb45b76..238ef0ed62f8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,19 +178,20 @@ enum rq_flag_bits {
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */
+ __REQ_END, /* last of chain of requests */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_WRITE (1 << __REQ_WRITE)
-#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1 << __REQ_SYNC)
-#define REQ_META (1 << __REQ_META)
-#define REQ_PRIO (1 << __REQ_PRIO)
-#define REQ_DISCARD (1 << __REQ_DISCARD)
-#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME)
-#define REQ_NOIDLE (1 << __REQ_NOIDLE)
+#define REQ_WRITE (1ULL << __REQ_WRITE)
+#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (1ULL << __REQ_SYNC)
+#define REQ_META (1ULL << __REQ_META)
+#define REQ_PRIO (1ULL << __REQ_PRIO)
+#define REQ_DISCARD (1ULL << __REQ_DISCARD)
+#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
+#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -206,28 +207,29 @@ enum rq_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
-#define REQ_RAHEAD (1 << __REQ_RAHEAD)
-#define REQ_THROTTLED (1 << __REQ_THROTTLED)
-
-#define REQ_SORTED (1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
-#define REQ_FUA (1 << __REQ_FUA)
-#define REQ_NOMERGE (1 << __REQ_NOMERGE)
-#define REQ_STARTED (1 << __REQ_STARTED)
-#define REQ_DONTPREP (1 << __REQ_DONTPREP)
-#define REQ_QUEUED (1 << __REQ_QUEUED)
-#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
-#define REQ_FAILED (1 << __REQ_FAILED)
-#define REQ_QUIET (1 << __REQ_QUIET)
-#define REQ_PREEMPT (1 << __REQ_PREEMPT)
-#define REQ_ALLOCED (1 << __REQ_ALLOCED)
-#define REQ_COPY_USER (1 << __REQ_COPY_USER)
-#define REQ_FLUSH (1 << __REQ_FLUSH)
-#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ)
-#define REQ_IO_STAT (1 << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
-#define REQ_SECURE (1 << __REQ_SECURE)
-#define REQ_KERNEL (1 << __REQ_KERNEL)
-#define REQ_PM (1 << __REQ_PM)
+#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
+#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
+
+#define REQ_SORTED (1ULL << __REQ_SORTED)
+#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
+#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
+#define REQ_STARTED (1ULL << __REQ_STARTED)
+#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
+#define REQ_QUEUED (1ULL << __REQ_QUEUED)
+#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
+#define REQ_FAILED (1ULL << __REQ_FAILED)
+#define REQ_QUIET (1ULL << __REQ_QUIET)
+#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
+#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
+#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
+#define REQ_FLUSH (1ULL << __REQ_FLUSH)
+#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
+#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
+#define REQ_SECURE (1ULL << __REQ_SECURE)
+#define REQ_KERNEL (1ULL << __REQ_KERNEL)
+#define REQ_PM (1ULL << __REQ_PM)
+#define REQ_END (1ULL << __REQ_END)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2fdb4a451b49..1b135d49b279 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -8,6 +8,7 @@
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
+#include <linux/llist.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
@@ -94,12 +95,19 @@ enum rq_cmd_type_bits {
* as well!
*/
struct request {
- struct list_head queuelist;
- struct call_single_data csd;
+ union {
+ struct list_head queuelist;
+ struct llist_node ll_list;
+ };
+ union {
+ struct call_single_data csd;
+ struct work_struct mq_flush_data;
+ };
struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
- unsigned int cmd_flags;
+ u64 cmd_flags;
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
@@ -160,8 +168,6 @@ struct request {
unsigned short ioprio;
- int ref_count;
-
void *special; /* opaque pointer available for LLD use */
char *buffer; /* kaddr of the current segment if available */
@@ -215,6 +221,8 @@ struct request_pm_state
#include <linux/elevator.h>
+struct blk_queue_ctx;
+
typedef void (request_fn_proc) (struct request_queue *q);
typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
@@ -313,6 +321,18 @@ struct request_queue {
dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn;
+ struct blk_mq_ops *mq_ops;
+
+ unsigned int *mq_map;
+
+ /* sw queues */
+ struct blk_mq_ctx *queue_ctx;
+ unsigned int nr_queues;
+
+ /* hw dispatch queues */
+ struct blk_mq_hw_ctx **queue_hw_ctx;
+ unsigned int nr_hw_queues;
+
/*
* Dispatch queue sorting
*/
@@ -361,6 +381,11 @@ struct request_queue {
*/
struct kobject kobj;
+ /*
+ * mq queue kobject
+ */
+ struct kobject mq_kobj;
+
#ifdef CONFIG_PM_RUNTIME
struct device *dev;
int rpm_status;
@@ -425,7 +450,13 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
- struct request flush_rq;
+ union {
+ struct request flush_rq;
+ struct {
+ spinlock_t mq_flush_lock;
+ struct work_struct mq_flush_work;
+ };
+ };
struct mutex sysfs_lock;
@@ -437,14 +468,14 @@ struct request_queue {
struct bsg_class_device bsg_dev;
#endif
-#ifdef CONFIG_BLK_CGROUP
- struct list_head all_q_node;
-#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+ wait_queue_head_t mq_freeze_wq;
+ struct percpu_counter mq_usage_counter;
+ struct list_head all_q_node;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -467,12 +498,16 @@ struct request_queue {
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
+#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_SAME_COMP))
+
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
if (q->queue_lock)
@@ -539,6 +574,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
+#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -570,7 +606,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
static inline unsigned int blk_queue_cluster(struct request_queue *q)
{
@@ -862,6 +898,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
return blk_queue_get_max_sectors(q, rq->cmd_flags);
}
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
/*
* Request issue related functions.
*/
@@ -1002,6 +1049,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
struct blk_plug {
unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */
+ struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
};
#define BLK_MAX_REQUEST_COUNT 16
@@ -1039,7 +1087,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
- return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
+ return plug &&
+ (!list_empty(&plug->list) ||
+ !list_empty(&plug->mq_list) ||
+ !list_empty(&plug->cb_list));
}
/*
@@ -1314,6 +1365,7 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7c2e030e72f1..afc1343df3c7 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -5,6 +5,7 @@
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
+#include <linux/list.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@@ -23,6 +24,7 @@ struct blk_trace {
struct dentry *dir;
struct dentry *dropped_file;
struct dentry *msg_file;
+ struct list_head running_list;
atomic_t dropped;
};
@@ -87,7 +89,7 @@ static inline int blk_trace_init_sysfs(struct device *dev)
#ifdef CONFIG_COMPAT
struct compat_blk_user_trace_setup {
- char name[32];
+ char name[BLKTRACE_BDEV_SIZE];
u16 act_mask;
u32 buf_size;
u32 buf_nr;
diff --git a/include/linux/capability.h b/include/linux/capability.h
index d9a4f7f40f32..a6ee1f9a5018 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -210,7 +210,6 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool capable(int cap);
extern bool ns_capable(struct user_namespace *ns, int cap);
-extern bool nsown_capable(int cap);
extern bool inode_capable(const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ce6df39f60ff..8f47625a0661 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
+extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3561d305b1e0..39c1d9469677 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -612,11 +612,6 @@ struct cgroup_subsys {
int subsys_id;
int disabled;
int early_init;
- /*
- * True if this subsys uses ID. ID is not available before cgroup_init()
- * (not available in early_init time.)
- */
- bool use_id;
/*
* If %false, this subsystem is properly hierarchical -
@@ -642,9 +637,6 @@ struct cgroup_subsys {
*/
struct cgroupfs_root *root;
struct list_head sibling;
- /* used when use_id == true */
- struct idr idr;
- spinlock_t id_lock;
/* list of cftype_sets */
struct list_head cftsets;
@@ -875,35 +867,6 @@ int css_scan_tasks(struct cgroup_subsys_state *css,
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
-/*
- * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
- * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
- * CSS ID is assigned at cgroup allocation (create) automatically
- * and removed when subsys calls free_css_id() function. This is because
- * the lifetime of cgroup_subsys_state is subsys's matter.
- *
- * Looking up and scanning function should be called under rcu_read_lock().
- * Taking cgroup_mutex is not necessary for following calls.
- * But the css returned by this routine can be "not populated yet" or "being
- * destroyed". The caller should check css and cgroup's status.
- */
-
-/*
- * Typically Called at ->destroy(), or somewhere the subsys frees
- * cgroup_subsys_state.
- */
-void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
-
-/* Find a cgroup_subsys_state which has given ID */
-
-struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
-
-/* Returns true if root is ancestor of cg */
-bool css_is_ancestor(struct cgroup_subsys_state *cg,
- const struct cgroup_subsys_state *root);
-
-/* Get id and depth of css */
-unsigned short css_id(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index dd7adff76e81..8138c94409f3 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -33,8 +33,11 @@ struct clk {
const char **parent_names;
struct clk **parents;
u8 num_parents;
+ u8 new_parent_index;
unsigned long rate;
unsigned long new_rate;
+ struct clk *new_parent;
+ struct clk *new_child;
unsigned long flags;
unsigned int enable_count;
unsigned int prepare_count;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 1ec14a732176..7e59253b8603 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -12,6 +12,7 @@
#define __LINUX_CLK_PROVIDER_H
#include <linux/clk.h>
+#include <linux/io.h>
#ifdef CONFIG_COMMON_CLK
@@ -27,6 +28,7 @@
#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
+#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
struct clk_hw;
@@ -79,6 +81,10 @@ struct clk_hw;
* @round_rate: Given a target rate as input, returns the closest rate actually
* supported by the clock.
*
+ * @determine_rate: Given a target rate as input, returns the closest rate
+ * actually supported by the clock, and optionally the parent clock
+ * that should be used to provide the clock rate.
+ *
* @get_parent: Queries the hardware to determine the parent of a clock. The
* return value is a u8 which specifies the index corresponding to
* the parent clock. This index can be applied to either the
@@ -126,6 +132,9 @@ struct clk_ops {
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long,
unsigned long *);
+ long (*determine_rate)(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_clk);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
int (*set_rate)(struct clk_hw *hw, unsigned long,
@@ -327,8 +336,10 @@ struct clk_mux {
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
#define CLK_MUX_HIWORD_MASK BIT(2)
+#define CLK_MUX_READ_ONLY BIT(3) /* mux setting cannot be changed */
extern const struct clk_ops clk_mux_ops;
+extern const struct clk_ops clk_mux_ro_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
const char **parent_names, u8 num_parents, unsigned long flags,
@@ -418,6 +429,7 @@ const char *__clk_get_name(struct clk *clk);
struct clk_hw *__clk_get_hw(struct clk *clk);
u8 __clk_get_num_parents(struct clk *clk);
struct clk *__clk_get_parent(struct clk *clk);
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned int __clk_get_prepare_count(struct clk *clk);
unsigned long __clk_get_rate(struct clk *clk);
@@ -425,6 +437,9 @@ unsigned long __clk_get_flags(struct clk *clk);
bool __clk_is_prepared(struct clk *clk);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate,
+ struct clk **best_parent_p);
/*
* FIXME clock api without lock protection
@@ -457,6 +472,7 @@ void of_clk_del_provider(struct device_node *np);
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
+int of_clk_get_parent_count(struct device_node *np);
const char *of_clk_get_parent_name(struct device_node *np, int index);
void of_clk_init(const struct of_device_id *matches);
@@ -490,5 +506,21 @@ static inline const char *of_clk_get_parent_name(struct device_node *np,
#define of_clk_init(matches) \
{ while (0); }
#endif /* CONFIG_OF */
+
+/*
+ * wrap access to peripherals in accessor routines
+ * for improved portability across platforms
+ */
+
+static inline u32 clk_readl(u32 __iomem *reg)
+{
+ return readl(reg);
+}
+
+static inline void clk_writel(u32 val, u32 __iomem *reg)
+{
+ writel(val, reg);
+}
+
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
index 90c30dc3efc7..5138a90e018c 100644
--- a/include/linux/clk/mxs.h
+++ b/include/linux/clk/mxs.h
@@ -9,8 +9,6 @@
#ifndef __LINUX_CLK_MXS_H
#define __LINUX_CLK_MXS_H
-int mx23_clocks_init(void);
-int mx28_clocks_init(void);
int mxs_saif_clkmux_select(unsigned int clkmux);
#endif
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644
index e074fdd5a236..000000000000
--- a/include/linux/clk/sunxi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-void __init sunxi_init_clocks(void);
-
-#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0857922e8ad0..493aa021c7a9 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -60,6 +60,7 @@ enum clock_event_mode {
* Core shall set the interrupt affinity dynamically in broadcast mode
*/
#define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+#define CLOCK_EVT_FEAT_PERCPU 0x000040
/**
* struct clock_event_device - clock event device descriptor
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index dbbf8aa7731b..67301a405712 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -292,6 +292,8 @@ extern void clocksource_resume(void);
extern struct clocksource * __init __weak clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern u64
+clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask);
extern void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
new file mode 100644
index 000000000000..a0f9280421ec
--- /dev/null
+++ b/include/linux/cmdline-parser.h
@@ -0,0 +1,45 @@
+/*
+ * Parsing command line, get the partitions information.
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#ifndef CMDLINEPARSEH
+#define CMDLINEPARSEH
+
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+/* partition flags */
+#define PF_RDONLY 0x01 /* Device is read only */
+#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+ char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+ sector_t from;
+ sector_t size;
+ int flags;
+ struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+ char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+ unsigned int nr_subparts;
+ struct cmdline_subpart *subpart;
+ struct cmdline_parts *next_parts;
+};
+
+void cmdline_parts_free(struct cmdline_parts **parts);
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev);
+
+void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param);
+
+#endif /* CMDLINEPARSEH */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ec1aee4aec9c..eb8a49d75ab3 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -41,13 +41,14 @@
COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
+ __attribute__((alias(__stringify(compat_SyS##name)))); \
static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
{ \
return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
} \
- SYSCALL_ALIAS(compat_sys##name, compat_SyS##name); \
static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#ifndef compat_user_stack_pointer
@@ -361,7 +362,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 842de225055f..ded429966c1f 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -65,6 +65,21 @@
#define __visible __attribute__((externally_visible))
#endif
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#if GCC_VERSION <= 40801
+# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+#else
+# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
+#endif
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10c40b6..dc1bd3dcf11f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
#endif
-#define uninitialized_var(x) x
-
#ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 3cd574d5b19e..5d5aaae3af43 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -5,7 +5,7 @@
* (C) Copyright 2001 Linus Torvalds
*
* Atomic wait-for-completion handler data structures.
- * See kernel/sched/core.c for details.
+ * See kernel/sched/completion.c for details.
*/
#include <linux/wait.h>
@@ -19,8 +19,8 @@
*
* See also: complete(), wait_for_completion() (and friends _timeout,
* _interruptible, _interruptible_timeout, and _killable), init_completion(),
- * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
- * INIT_COMPLETION().
+ * reinit_completion(), and macros DECLARE_COMPLETION(),
+ * DECLARE_COMPLETION_ONSTACK().
*/
struct completion {
unsigned int done;
@@ -65,7 +65,7 @@ struct completion {
/**
* init_completion - Initialize a dynamically allocated completion
- * @x: completion structure that is to be initialized
+ * @x: pointer to completion structure that is to be initialized
*
* This inline function will initialize a dynamically created completion
* structure.
@@ -76,6 +76,18 @@ static inline void init_completion(struct completion *x)
init_waitqueue_head(&x->wait);
}
+/**
+ * reinit_completion - reinitialize a completion structure
+ * @x: pointer to completion structure that is to be reinitialized
+ *
+ * This inline function should be used to reinitialize a completion structure so it can
+ * be reused. This is especially important after complete_all() is used.
+ */
+static inline void reinit_completion(struct completion *x)
+{
+ x->done = 0;
+}
+
extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
@@ -94,14 +106,4 @@ extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
-/**
- * INIT_COMPLETION - reinitialize a completion structure
- * @x: completion structure to be reinitialized
- *
- * This macro should be used to reinitialize a completion structure so it can
- * be reused. This is especially important after complete_all() is used.
- */
-#define INIT_COMPLETION(x) ((x).done = 0)
-
-
#endif
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index a98f1ca60407..d016a121a8c4 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,12 +10,14 @@
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-extern int dump_write(struct file *file, const void *addr, int nr);
-extern int dump_seek(struct file *file, loff_t off);
+struct coredump_params;
+extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
+extern int dump_align(struct coredump_params *cprm, int align);
#ifdef CONFIG_COREDUMP
-extern void do_coredump(siginfo_t *siginfo);
+extern void do_coredump(const siginfo_t *siginfo);
#else
-static inline void do_coredump(siginfo_t *siginfo) {}
+static inline void do_coredump(const siginfo_t *siginfo) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c23049496531..2fc0ec3d89cc 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -218,8 +218,8 @@ enum {
#define CPER_PROC_VALID_IP 0x1000
#define CPER_MEM_VALID_ERROR_STATUS 0x0001
-#define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002
-#define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004
+#define CPER_MEM_VALID_PA 0x0002
+#define CPER_MEM_VALID_PA_MASK 0x0004
#define CPER_MEM_VALID_NODE 0x0008
#define CPER_MEM_VALID_CARD 0x0010
#define CPER_MEM_VALID_MODULE 0x0020
@@ -232,6 +232,9 @@ enum {
#define CPER_MEM_VALID_RESPONDER_ID 0x1000
#define CPER_MEM_VALID_TARGET_ID 0x2000
#define CPER_MEM_VALID_ERROR_TYPE 0x4000
+#define CPER_MEM_VALID_RANK_NUMBER 0x8000
+#define CPER_MEM_VALID_CARD_HANDLE 0x10000
+#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
#define CPER_PCIE_VALID_PORT_TYPE 0x0001
#define CPER_PCIE_VALID_VERSION 0x0002
@@ -347,6 +350,10 @@ struct cper_sec_mem_err {
__u64 responder_id;
__u64 target_id;
__u8 error_type;
+ __u8 reserved;
+ __u16 rank;
+ __u16 mem_array_handle; /* card handle in UEFI 2.4 */
+ __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
};
struct cper_sec_pcie {
@@ -389,6 +396,6 @@ struct cper_sec_pcie {
u64 cper_next_record_id(void);
void cper_print_bits(const char *prefix, unsigned int bits,
- const char *strs[], unsigned int strs_size);
+ const char * const strs[], unsigned int strs_size);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 801ff9e73679..03e235ad1bba 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/cpumask.h>
struct device;
+struct device_node;
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -29,6 +30,8 @@ extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
+extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
@@ -185,19 +188,6 @@ extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-extern void cpu_hotplug_driver_lock(void);
-extern void cpu_hotplug_driver_unlock(void);
-#else
-static inline void cpu_hotplug_driver_lock(void)
-{
-}
-
-static inline void cpu_hotplug_driver_unlock(void)
-{
-}
-#endif
-
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {}
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index 1739510d8994..bdd18caa6c94 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -52,8 +52,6 @@ static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
return rmap->obj[rmap->near[cpu].index];
}
-#ifdef CONFIG_GENERIC_HARDIRQS
-
/**
* alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
* @size: Number of objects to be mapped
@@ -68,5 +66,4 @@ extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
-#endif
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d568f3975eeb..dc196bbcf227 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,7 +85,20 @@ struct cpufreq_policy {
struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
- int transition_ongoing; /* Tracks transition status */
+
+ /*
+ * The rules for this semaphore:
+ * - Any routine that wants to read from the policy structure will
+ * do a down_read on this semaphore.
+ * - Any routine that will write to the policy structure and/or may take away
+ * the policy altogether (eg. CPU hotplug), will hold this lock in write
+ * mode before doing so.
+ *
+ * Additional rules:
+ * - Lock should not be held across
+ * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+ */
+ struct rw_semaphore rwsem;
};
/* Only for ACPI */
@@ -94,8 +107,16 @@ struct cpufreq_policy {
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
+#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
@@ -181,13 +202,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
u8 flags;
- /*
- * This should be set by platforms having multiple clock-domains, i.e.
- * supporting multiple policies. With this sysfs directories of governor
- * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
- * use the same governor with different tunables for different clusters.
- */
- bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
@@ -195,9 +209,11 @@ struct cpufreq_driver {
/* define one out of two */
int (*setpolicy) (struct cpufreq_policy *policy);
- int (*target) (struct cpufreq_policy *policy,
+ int (*target) (struct cpufreq_policy *policy, /* Deprecated */
unsigned int target_freq,
unsigned int relation);
+ int (*target_index) (struct cpufreq_policy *policy,
+ unsigned int index);
/* should be defined, if possible */
unsigned int (*get) (unsigned int cpu);
@@ -212,13 +228,29 @@ struct cpufreq_driver {
};
/* flags */
-#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
- * all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
- * "constants" aren't affected by
- * frequency transitions */
-#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
- * mismatches */
+#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
+ all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
+ kernel "constants" aren't
+ affected by frequency
+ transitions */
+#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
+ speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+
+/*
+ * Driver will do POSTCHANGE notifications from outside of their ->target()
+ * routine and so must set cpufreq_driver->flags with this flag, so that core
+ * can handle them specially.
+ */
+#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -241,6 +273,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return;
}
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+}
+
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
@@ -393,6 +432,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
@@ -408,8 +448,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table);
+
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency);
+static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 781addc66f03..50fcbb0ac4e7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -114,7 +114,7 @@ struct cpuidle_driver {
int safe_state_index;
/* the driver handles the cpus in cpumask */
- struct cpumask *cpumask;
+ struct cpumask *cpumask;
};
#ifdef CONFIG_CPU_IDLE
@@ -195,16 +195,10 @@ struct cpuidle_governor {
};
#ifdef CONFIG_CPU_IDLE
-
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
-
#else
-
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
-static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
-
#endif
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index cc1b01cf2035..3fe661fe96d1 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -110,10 +110,14 @@ static inline bool put_mems_allowed(unsigned int seq)
static inline void set_mems_allowed(nodemask_t nodemask)
{
+ unsigned long flags;
+
task_lock(current);
+ local_irq_save(flags);
write_seqcount_begin(&current->mems_allowed_seq);
current->mems_allowed = nodemask;
write_seqcount_end(&current->mems_allowed_seq);
+ local_irq_restore(flags);
task_unlock(current);
}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 37e4f8da7cdf..fe68a5a98583 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -12,6 +12,15 @@
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+ unsigned long long *size);
+extern void __weak elfcorehdr_free(unsigned long long addr);
+extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index a9c96d865ee7..b3cb71f0d3b0 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -3,6 +3,10 @@
#include <linux/types.h>
+#define CRC_T10DIF_DIGEST_SIZE 2
+#define CRC_T10DIF_BLOCK_SIZE 1
+
+__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
__u16 crc_t10dif(unsigned char const *, size_t);
#endif
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 68267b64bb98..7d275c4fc011 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -11,8 +11,48 @@
extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len);
+/**
+ * crc32_le_combine - Combine two crc32 check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, crc32_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32 of the first block
+ * @crc2: crc32 of the second block
+ * @len2: length of the second block
+ *
+ * Return: The crc32_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the crc32_le() value of seq_full, then crc_full ==
+ * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
+ * with the same initializer as crc1, and crc2 seed was 0. See
+ * also crc32_combine_test().
+ */
+extern u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
+
extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len);
+/**
+ * __crc32c_le_combine - Combine two crc32c check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, __crc32c_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32c of the first block
+ * @crc2: crc32c of the second block
+ * @len2: length of the second block
+ *
+ * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the __crc32c_le() value of seq_full, then crc_full ==
+ * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
+ * seeded with the same initializer as crc1, and crc2 seed
+ * was 0. See also crc32c_combine_test().
+ */
+extern u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
+
#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
/*
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 9169b91ea2d2..bf72e9ac6de0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
/* The hash is always the low bits of hash_len */
#ifdef __LITTLE_ENDIAN
#define HASH_LEN_DECLARE u32 hash; u32 len;
+ #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
#else
#define HASH_LEN_DECLARE u32 len; u32 hash;
+ #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
#endif
/*
@@ -55,11 +57,11 @@ struct qstr {
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
struct dentry_stat_t {
- int nr_dentry;
- int nr_unused;
- int age_limit; /* age in seconds */
- int want_pages; /* pages requested by system */
- int dummy[2];
+ long nr_dentry;
+ long nr_unused;
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long dummy[2];
};
extern struct dentry_stat_t dentry_stat;
@@ -169,13 +171,13 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x0001
-#define DCACHE_OP_COMPARE 0x0002
-#define DCACHE_OP_REVALIDATE 0x0004
-#define DCACHE_OP_DELETE 0x0008
-#define DCACHE_OP_PRUNE 0x0010
+#define DCACHE_OP_HASH 0x00000001
+#define DCACHE_OP_COMPARE 0x00000002
+#define DCACHE_OP_REVALIDATE 0x00000004
+#define DCACHE_OP_DELETE 0x00000008
+#define DCACHE_OP_PRUNE 0x00000010
-#define DCACHE_DISCONNECTED 0x0020
+#define DCACHE_DISCONNECTED 0x00000020
/* This dentry is possibly not currently connected to the dcache tree, in
* which case its parent will either be itself, or will have this flag as
* well. nfsd will not use a dentry with this bit set, but will first
@@ -186,33 +188,42 @@ struct dentry_operations {
* dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias. */
-#define DCACHE_REFERENCED 0x0040 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS 0x0080 /* Entry has ever been RCU-visible */
+#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
-#define DCACHE_CANT_MOUNT 0x0100
-#define DCACHE_GENOCIDE 0x0200
-#define DCACHE_SHRINK_LIST 0x0400
+#define DCACHE_CANT_MOUNT 0x00000100
+#define DCACHE_GENOCIDE 0x00000200
+#define DCACHE_SHRINK_LIST 0x00000400
-#define DCACHE_OP_WEAK_REVALIDATE 0x0800
+#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
-#define DCACHE_NFSFS_RENAMED 0x1000
+#define DCACHE_NFSFS_RENAMED 0x00001000
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
-#define DCACHE_COOKIE 0x2000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x4000
+#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
/* Parent inode is watched by some fsnotify listener */
-#define DCACHE_MOUNTED 0x10000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */
+#define DCACHE_DENTRY_KILLED 0x00008000
+
+#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_DENTRY_KILLED 0x100000
+#define DCACHE_LRU_LIST 0x00080000
+
+#define DCACHE_ENTRY_TYPE 0x00700000
+#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */
+#define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */
+#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */
+#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
extern seqlock_t rename_lock;
-static inline int dname_external(struct dentry *dentry)
+static inline int dname_external(const struct dentry *dentry)
{
return dentry->d_name.name != dentry->d_iname;
}
@@ -223,6 +234,7 @@ static inline int dname_external(struct dentry *dentry)
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -253,6 +265,7 @@ extern void d_prune_aliases(struct inode *);
/* test whether we have any submounts in a subdir tree */
extern int have_submounts(struct dentry *);
+extern int check_submounts_and_drop(struct dentry *);
/*
* This adds the entry to the hash queues.
@@ -357,17 +370,17 @@ extern struct dentry *dget_parent(struct dentry *dentry);
* Returns true if the dentry passed is not currently hashed.
*/
-static inline int d_unhashed(struct dentry *dentry)
+static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
}
-static inline int d_unlinked(struct dentry *dentry)
+static inline int d_unlinked(const struct dentry *dentry)
{
return d_unhashed(dentry) && !IS_ROOT(dentry);
}
-static inline int cant_mount(struct dentry *dentry)
+static inline int cant_mount(const struct dentry *dentry)
{
return (dentry->d_flags & DCACHE_CANT_MOUNT);
}
@@ -381,16 +394,75 @@ static inline void dont_mount(struct dentry *dentry)
extern void dput(struct dentry *);
-static inline bool d_managed(struct dentry *dentry)
+static inline bool d_managed(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_MANAGED_DENTRY;
}
-static inline bool d_mountpoint(struct dentry *dentry)
+static inline bool d_mountpoint(const struct dentry *dentry)
{
return dentry->d_flags & DCACHE_MOUNTED;
}
+/*
+ * Directory cache entry type accessor functions.
+ */
+static inline void __d_set_type(struct dentry *dentry, unsigned type)
+{
+ dentry->d_flags = (dentry->d_flags & ~DCACHE_ENTRY_TYPE) | type;
+}
+
+static inline void __d_clear_type(struct dentry *dentry)
+{
+ __d_set_type(dentry, DCACHE_MISS_TYPE);
+}
+
+static inline void d_set_type(struct dentry *dentry, unsigned type)
+{
+ spin_lock(&dentry->d_lock);
+ __d_set_type(dentry, type);
+ spin_unlock(&dentry->d_lock);
+}
+
+static inline unsigned __d_entry_type(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_ENTRY_TYPE;
+}
+
+static inline bool d_is_directory(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
+}
+
+static inline bool d_is_autodir(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
+}
+
+static inline bool d_is_symlink(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
+}
+
+static inline bool d_is_file(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+}
+
+static inline bool d_is_negative(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_positive(const struct dentry *dentry)
+{
+ return !d_is_negative(dentry);
+}
+
extern int sysctl_vfs_cache_pressure;
+static inline unsigned long vfs_pressure_ratio(unsigned long val)
+{
+ return mult_frac(val, sysctl_vfs_cache_pressure, 100);
+}
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 263489d0788d..4d0b4d1aa132 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -206,6 +206,12 @@ static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mod
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
u32 *value)
@@ -227,6 +233,12 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix)
+{
+ return 0;
+}
+
static inline bool debugfs_initialized(void)
{
return false;
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 32acc0090ce6..d48dc00232a4 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -15,7 +15,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
#define DEVFREQ_NAME_LEN 16
@@ -187,7 +187,7 @@ extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct opp *devfreq_recommended_opp(struct device *dev,
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);
extern int devfreq_register_opp_notifier(struct device *dev,
struct devfreq *devfreq);
@@ -238,7 +238,7 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
-static inline struct opp *devfreq_recommended_opp(struct device *dev,
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e151d4c9298d..ed419c62dde1 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -10,6 +10,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/math64.h>
#include <linux/ratelimit.h>
struct dm_dev;
@@ -405,13 +406,14 @@ int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-
/*-----------------------------------------------------------------
* Functions for manipulating device-mapper tables.
*---------------------------------------------------------------*/
@@ -550,6 +552,14 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+#define dm_sector_div64(x, y)( \
+{ \
+ u64 _res; \
+ (x) = div64_u64_rem(x, y, &_res); \
+ _res; \
+} \
+)
+
/*
* Ceiling(n / sz)
*/
diff --git a/include/linux/device.h b/include/linux/device.h
index f46646e49235..952b01033c32 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -26,6 +26,7 @@
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uidgid.h>
+#include <linux/gfp.h>
#include <asm/device.h>
struct device;
@@ -63,9 +64,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @name: The name of the bus.
* @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
* @dev_root: Default device to use as the parent.
- * @bus_attrs: Default attributes of the bus.
* @dev_attrs: Default attributes of the devices on the bus.
- * @drv_attrs: Default attributes of the device drivers on the bus.
* @bus_groups: Default attributes of the bus.
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
@@ -106,9 +105,7 @@ struct bus_type {
const char *name;
const char *dev_name;
struct device *dev_root;
- struct bus_attribute *bus_attrs; /* use bus_groups instead */
struct device_attribute *dev_attrs; /* use dev_groups instead */
- struct driver_attribute *drv_attrs; /* use drv_groups instead */
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
@@ -329,8 +326,6 @@ int subsys_virtual_register(struct bus_type *subsys,
* @owner: The module owner.
* @class_attrs: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_attrs: Default attributes of the devices belong to the class.
- * @dev_bin_attrs: Default binary attributes of the devices belong to the class.
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
* few other things that generate uevents to add the environment
@@ -358,9 +353,7 @@ struct class {
struct module *owner;
struct class_attribute *class_attrs;
- struct device_attribute *dev_attrs; /* use dev_groups instead */
const struct attribute_group **dev_groups;
- struct bin_attribute *dev_bin_attrs;
struct kobject *dev_kobj;
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
@@ -427,8 +420,6 @@ struct class_attribute {
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
- const void *(*namespace)(struct class *class,
- const struct class_attribute *attr);
};
#define CLASS_ATTR(_name, _mode, _show, _store) \
@@ -438,10 +429,24 @@ struct class_attribute {
#define CLASS_ATTR_RO(_name) \
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-extern int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr);
-extern void class_remove_file(struct class *class,
- const struct class_attribute *attr);
+extern int __must_check class_create_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+extern void class_remove_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_remove_file_ns(class, attr, NULL);
+}
/* Simple class attribute that is just a static string */
struct class_attribute_string {
@@ -602,8 +607,24 @@ extern void devres_close_group(struct device *dev, void *id);
extern void devres_remove_group(struct device *dev, void *id);
extern int devres_release_group(struct device *dev, void *id);
-/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */
-extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
+/* managed devm_k.alloc/kfree for device drivers */
+extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return devm_kmalloc(dev, n * size, flags);
+}
+static inline void *devm_kcalloc(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
extern void devm_kfree(struct device *dev, void *p);
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
@@ -623,9 +644,11 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
+struct acpi_device;
+
struct acpi_dev_node {
#ifdef CONFIG_ACPI
- void *handle;
+ struct acpi_device *companion;
#endif
};
@@ -737,7 +760,7 @@ struct device {
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
@@ -769,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj)
return container_of(kobj, struct device, kobj);
}
-#ifdef CONFIG_ACPI
-#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle)
-#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_)
-#else
-#define ACPI_HANDLE(dev) (NULL)
-#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0)
-#endif
-
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
@@ -1149,16 +1164,15 @@ do { \
#endif
/*
- * dev_WARN*() acts like dev_printk(), but with the key difference
- * of using a WARN/WARN_ON to get the message out, including the
- * file/line information and a backtrace.
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
#define dev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(condition, "Device %s\n" format, \
- dev_driver_string(dev), ## arg)
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 00141d3325fe..3b28f937d959 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -67,9 +67,53 @@ struct device;
extern struct cma *dma_contiguous_default_area;
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+}
+
+static inline void dma_contiguous_set_default(struct cma *cma)
+{
+ dma_contiguous_default_area = cma;
+}
+
void dma_contiguous_reserve(phys_addr_t addr_limit);
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit);
+
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+
+static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma *cma;
+ int ret;
+ ret = dma_contiguous_reserve_area(size, base, limit, &cma);
+ if (ret == 0)
+ dev_set_cma_area(dev, cma);
+
+ return ret;
+}
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int order);
@@ -80,8 +124,22 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
#define MAX_CMA_AREAS (0)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
+
+static inline void dma_contiguous_set_default(struct cma *cma) { }
+
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma) {
+ return -ENOSYS;
+}
+
static inline
int dma_declare_contiguous(struct device *dev, phys_addr_t size,
phys_addr_t base, phys_addr_t limit)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3a8d0a2af607..fd4aee29ad10 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
}
#endif
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+ dev->dma_mask = &dev->coherent_dma_mask;
+ return dma_set_mask_and_coherent(dev, mask);
+}
+
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644
index 000000000000..2dc9b2bc18fc
--- /dev/null
+++ b/include/linux/dma/mmp-pdma.h
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index cb286b1acdb6..41cf0c399288 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -38,17 +38,20 @@ typedef s32 dma_cookie_t;
#define DMA_MIN_COOKIE 1
#define DMA_MAX_COOKIE INT_MAX
-#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
+static inline int dma_submit_error(dma_cookie_t cookie)
+{
+ return cookie < 0 ? cookie : 0;
+}
/**
* enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
- DMA_SUCCESS,
+ DMA_COMPLETE,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
@@ -168,12 +171,6 @@ struct dma_interleaved_template {
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
- * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
- * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
- * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
- * (if not set, do the source dma-unmapping as page)
- * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
- * (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@@ -185,14 +182,10 @@ struct dma_interleaved_template {
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
- DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
- DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
- DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
- DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
- DMA_PREP_PQ_DISABLE_P = (1 << 6),
- DMA_PREP_PQ_DISABLE_Q = (1 << 7),
- DMA_PREP_CONTINUE = (1 << 8),
- DMA_PREP_FENCE = (1 << 9),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
};
/**
@@ -370,6 +363,25 @@ struct dma_slave_config {
unsigned int slave_id;
};
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ * since the enum dma_transfer_direction is not defined as bits for each
+ * type of direction, the dma controller should fill (1 << <TYPE>) and same
+ * should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dstn_addr_widths;
+ u32 directions;
+ bool cmd_pause;
+ bool cmd_terminate;
+};
+
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
@@ -391,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref);
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[0];
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
@@ -416,6 +439,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
+ struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -423,6 +447,40 @@ struct dma_async_tx_descriptor {
#endif
};
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (tx->unmap) {
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+ }
+}
+
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
@@ -532,6 +590,7 @@ struct dma_tx_state {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
+ * @device_slave_caps: return the slave channel capabilities
*/
struct dma_device {
@@ -597,6 +656,7 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
};
static inline int dmaengine_device_control(struct dma_chan *chan,
@@ -670,6 +730,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+{
+ if (!chan || !caps)
+ return -EINVAL;
+
+ /* check if the channel supports slave transactions */
+ if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+ return -ENXIO;
+
+ if (chan->device->device_slave_caps)
+ return chan->device->device_slave_caps(chan, caps);
+
+ return -ENXIO;
+}
+
static inline int dmaengine_terminate_all(struct dma_chan *chan)
{
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@ -940,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
}
@@ -958,8 +1033,9 @@ dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used,
}
}
-enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void dma_issue_pending_all(void);
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
@@ -967,9 +1043,17 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
#else
+static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+ return NULL;
+}
+static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ return DMA_COMPLETE;
+}
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
- return DMA_SUCCESS;
+ return DMA_COMPLETE;
}
static inline void dma_issue_pending_all(void)
{
@@ -994,7 +1078,7 @@ static inline void dma_release_channel(struct dma_chan *chan)
int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index b6eb7a05d58e..f820f0a336c9 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -99,6 +99,7 @@ extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
extern void dmi_scan_machine(void);
+extern void dmi_memdev_walk(void);
extern void dmi_set_dump_stack_arch_desc(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_name_in_vendors(const char *str);
@@ -107,6 +108,7 @@ extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
+extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
#else
@@ -115,6 +117,7 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
static inline void dmi_scan_machine(void) { return; }
+static inline void dmi_memdev_walk(void) { }
static inline void dmi_set_dump_stack_arch_desc(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
@@ -133,6 +136,8 @@ static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data) { return -1; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
+static inline void dmi_memdev_name(u16 handle, const char **bank,
+ const char **device) { }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5c6d7fbaf89e..dbdffe8d4469 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -51,7 +51,7 @@ static inline void opstate_init(void)
#define EDAC_MC_LABEL_LEN 31
/* Maximum size of the location string */
-#define LOCATION_SIZE 80
+#define LOCATION_SIZE 256
/* Defines the maximum number of labels that can be reported */
#define EDAC_MAX_LABELS 8
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5f8f176154f7..11ce6784a196 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -39,6 +39,8 @@
typedef unsigned long efi_status_t;
typedef u8 efi_bool_t;
typedef u16 efi_char16_t; /* UNICODE character */
+typedef u64 efi_physical_addr_t;
+typedef void *efi_handle_t;
typedef struct {
@@ -96,6 +98,7 @@ typedef struct {
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
+#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -157,11 +160,13 @@ typedef struct {
efi_table_hdr_t hdr;
void *raise_tpl;
void *restore_tpl;
- void *allocate_pages;
- void *free_pages;
- void *get_memory_map;
- void *allocate_pool;
- void *free_pool;
+ efi_status_t (*allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
+ efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (*allocate_pool)(int, unsigned long, void **);
+ efi_status_t (*free_pool)(void *);
void *create_event;
void *set_timer;
void *wait_for_event;
@@ -171,7 +176,7 @@ typedef struct {
void *install_protocol_interface;
void *reinstall_protocol_interface;
void *uninstall_protocol_interface;
- void *handle_protocol;
+ efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
void *__reserved;
void *register_protocol_notify;
void *locate_handle;
@@ -181,7 +186,7 @@ typedef struct {
void *start_image;
void *exit;
void *unload_image;
- void *exit_boot_services;
+ efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
void *get_next_monotonic_count;
void *stall;
void *set_watchdog_timer;
@@ -404,6 +409,12 @@ typedef struct {
unsigned long table;
} efi_config_table_t;
+typedef struct {
+ efi_guid_t guid;
+ const char *name;
+ unsigned long *ptr;
+} efi_config_table_type_t;
+
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
@@ -488,10 +499,6 @@ typedef struct {
unsigned long unload;
} efi_loaded_image_t;
-typedef struct {
- u64 revision;
- void *open_volume;
-} efi_file_io_interface_t;
typedef struct {
u64 size;
@@ -504,20 +511,30 @@ typedef struct {
efi_char16_t filename[1];
} efi_file_info_t;
-typedef struct {
+typedef struct _efi_file_handle {
u64 revision;
- void *open;
- void *close;
+ efi_status_t (*open)(struct _efi_file_handle *,
+ struct _efi_file_handle **,
+ efi_char16_t *, u64, u64);
+ efi_status_t (*close)(struct _efi_file_handle *);
void *delete;
- void *read;
+ efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
+ void *);
void *write;
void *get_position;
void *set_position;
- void *get_info;
+ efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
+ unsigned long *, void *);
void *set_info;
void *flush;
} efi_file_handle_t;
+typedef struct _efi_file_io_interface {
+ u64 revision;
+ int (*open_volume)(struct _efi_file_io_interface *,
+ efi_file_handle_t **);
+} efi_file_io_interface_t;
+
#define EFI_FILE_MODE_READ 0x0000000000000001
#define EFI_FILE_MODE_WRITE 0x0000000000000002
#define EFI_FILE_MODE_CREATE 0x8000000000000000
@@ -552,6 +569,7 @@ extern struct efi {
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
+ struct efi_memory_map *memmap;
} efi;
static inline int
@@ -587,6 +605,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
}
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern int efi_config_init(efi_config_table_type_t *arch_tables);
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -782,6 +801,15 @@ struct efivar_entry {
struct efi_variable var;
struct list_head list;
struct kobject kobj;
+ bool scanning;
+ bool deleting;
+};
+
+
+struct efi_simple_text_output_protocol {
+ void *reset;
+ efi_status_t (*output_string)(void *, void *);
+ void *test_string;
};
extern struct list_head efivar_sysfs_list;
@@ -840,6 +868,8 @@ void efivar_run_worker(void);
#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
int efivars_sysfs_init(void);
+#define EFIVARS_DATA_SIZE_MAX 1024
+
#endif /* CONFIG_EFI_VARS */
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 40a3c0e01b2b..67a5fa7830c4 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -39,13 +39,13 @@ extern Elf64_Dyn _DYNAMIC [];
/* Optional callbacks to write extra ELF notes. */
struct file;
+struct coredump_params;
#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
static inline int elf_coredump_extra_notes_size(void) { return 0; }
-static inline int elf_coredump_extra_notes_write(struct file *file,
- loff_t *foffset) { return 0; }
+static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
#else
extern int elf_coredump_extra_notes_size(void);
-extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset);
+extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
#endif
#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index cdd3d13efce7..698d51a0eea3 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -6,6 +6,8 @@
#include <asm/elf.h>
#include <uapi/linux/elfcore.h>
+struct coredump_params;
+
static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
{
#ifdef ELF_CORE_COPY_REGS
@@ -63,10 +65,9 @@ static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregse
*/
extern Elf_Half elf_core_extra_phdrs(void);
extern int
-elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
- unsigned long limit);
+elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
-elf_core_write_extra_data(struct file *file, size_t *size, unsigned long limit);
+elf_core_write_extra_data(struct coredump_params *cprm);
extern size_t elf_core_extra_data_size(void);
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index d8b512496e50..fc4a9aa7dd82 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -28,27 +28,24 @@
#include <asm/unaligned.h>
#ifdef __KERNEL__
-extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type,
- const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
-
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+ const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+ __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+ const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
unsigned int rxqs);
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index cf5d2af61b81..ff0b981f078e 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,7 +9,6 @@
#define _LINUX_EVENTFD_H
#include <linux/fcntl.h>
-#include <linux/file.h>
#include <linux/wait.h>
/*
@@ -26,6 +25,8 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct file;
+
#ifdef CONFIG_EVENTFD
struct file *eventfd_file_create(unsigned int count, int flags);
diff --git a/include/linux/export.h b/include/linux/export.h
index 412cd509effe..3f2793d51899 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,7 +43,7 @@ extern struct module __this_module;
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
#define __CRC_SYMBOL(sym, sec) \
- extern void *__crc_##sym __attribute__((weak)); \
+ extern __visible void *__crc_##sym __attribute__((weak)); \
static const unsigned long __kcrctab_##sym \
__used \
__attribute__((section("___kcrctab" sec "+" #sym), unused)) \
@@ -59,7 +59,7 @@ extern struct module __this_module;
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
- static const struct kernel_symbol __ksymtab_##sym \
+ __visible const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
= { (unsigned long)&sym, __kstrtab_##sym }
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index fcb51c88319f..21c59af1150b 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -51,10 +51,10 @@
enum extcon_cable_name {
EXTCON_USB = 0,
EXTCON_USB_HOST,
- EXTCON_TA, /* Travel Adaptor */
+ EXTCON_TA, /* Travel Adaptor */
EXTCON_FAST_CHARGER,
EXTCON_SLOW_CHARGER,
- EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
+ EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
EXTCON_HDMI,
EXTCON_MHL,
EXTCON_DVI,
@@ -76,8 +76,8 @@ struct extcon_cable;
/**
* struct extcon_dev - An extcon device represents one external connector.
- * @name: The name of this extcon device. Parent device name is used
- * if NULL.
+ * @name: The name of this extcon device. Parent device name is
+ * used if NULL.
* @supported_cable: Array of supported cable names ending with NULL.
* If supported_cable is NULL, cable name related APIs
* are disabled.
@@ -89,21 +89,21 @@ struct extcon_cable;
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
- * @print_name: An optional callback to override the method to print the
- * name of the extcon device.
+ * @print_name: An optional callback to override the method to print the
+ * name of the extcon device.
* @print_state: An optional callback to override the method to print the
- * status of the extcon device.
- * @dev: Device of this extcon. Do not provide at register-time.
- * @state: Attach/detach state of this extcon. Do not provide at
- * register-time
- * @nh: Notifier for the state change events from this extcon
- * @entry: To support list of extcon devices so that users can search
- * for extcon devices based on the extcon name.
+ * status of the extcon device.
+ * @dev: Device of this extcon.
+ * @state: Attach/detach state of this extcon. Do not provide at
+ * register-time.
+ * @nh: Notifier for the state change events from this extcon
+ * @entry: To support list of extcon devices so that users can search
+ * for extcon devices based on the extcon name.
* @lock:
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
* customized for each extcon device.
- * @cables: Sysfs subdirectories. Each represents one cable.
+ * @cables: Sysfs subdirectories. Each represents one cable.
*
* In most cases, users only need to provide "User initializing data" of
* this struct when registering an extcon. In some exceptional cases,
@@ -111,26 +111,27 @@ struct extcon_cable;
* are overwritten by register function.
*/
struct extcon_dev {
- /* --- Optional user initializing data --- */
- const char *name;
+ /* Optional user initializing data */
+ const char *name;
const char **supported_cable;
- const u32 *mutually_exclusive;
+ const u32 *mutually_exclusive;
- /* --- Optional callbacks to override class functions --- */
+ /* Optional callbacks to override class functions */
ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
- /* --- Internal data. Please do not set. --- */
- struct device *dev;
- u32 state;
+ /* Internal data. Please do not set. */
+ struct device dev;
struct raw_notifier_head nh;
struct list_head entry;
- spinlock_t lock; /* could be called by irq handler */
int max_supported;
+ spinlock_t lock; /* could be called by irq handler */
+ u32 state;
/* /sys/class/extcon/.../cable.n/... */
struct device_type extcon_dev_type;
struct extcon_cable *cables;
+
/* /sys/class/extcon/.../mutually_exclusive/... */
struct attribute_group attr_g_muex;
struct attribute **attrs_muex;
@@ -138,13 +139,13 @@ struct extcon_dev {
};
/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev: The extcon device
+ * struct extcon_cable - An internal data for each cable of extcon device.
+ * @edev: The extcon device
* @cable_index: Index of this cable in the edev
- * @attr_g: Attribute group for the cable
- * @attr_name: "name" sysfs entry
- * @attr_state: "state" sysfs entry
- * @attrs: Array pointing to attr_name and attr_state for attr_g
+ * @attr_g: Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attrs: Array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -159,11 +160,13 @@ struct extcon_cable {
/**
* struct extcon_specific_cable_nb - An internal data for
- * extcon_register_interest().
- * @internal_nb: a notifier block bridging extcon notifier and cable notifier.
- * @user_nb: user provided notifier block for events from a specific cable.
+ * extcon_register_interest().
+ * @internal_nb: A notifier block bridging extcon notifier
+ * and cable notifier.
+ * @user_nb: user provided notifier block for events from
+ * a specific cable.
* @cable_index: the target cable.
- * @edev: the target extcon device.
+ * @edev: the target extcon device.
* @previous_value: the saved previous event value.
*/
struct extcon_specific_cable_nb {
@@ -180,7 +183,7 @@ struct extcon_specific_cable_nb {
* Following APIs are for notifiers or configurations.
* Notifiers are the external port and connection devices.
*/
-extern int extcon_dev_register(struct extcon_dev *edev, struct device *dev);
+extern int extcon_dev_register(struct extcon_dev *edev);
extern void extcon_dev_unregister(struct extcon_dev *edev);
extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
@@ -238,8 +241,7 @@ extern int extcon_register_notifier(struct extcon_dev *edev,
extern int extcon_unregister_notifier(struct extcon_dev *edev,
struct notifier_block *nb);
#else /* CONFIG_EXTCON */
-static inline int extcon_dev_register(struct extcon_dev *edev,
- struct device *dev)
+static inline int extcon_dev_register(struct extcon_dev *edev)
{
return 0;
}
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
index 20e9eef25d4c..9ca958c4e94c 100644
--- a/include/linux/extcon/extcon-adc-jack.h
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -20,10 +20,10 @@
/**
* struct adc_jack_cond - condition to use an extcon state
- * @state - the corresponding extcon state (if 0, this struct denotes
- * the last adc_jack_cond element among the array)
- * @min_adc - min adc value for this condition
- * @max_adc - max adc value for this condition
+ * @state: the corresponding extcon state (if 0, this struct
+ * denotes the last adc_jack_cond element among the array)
+ * @min_adc: min adc value for this condition
+ * @max_adc: max adc value for this condition
*
* For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means
* that if ADC value is between (inclusive) 100 and 200, than the cable 0 and
@@ -33,34 +33,34 @@
* because when no adc_jack_cond is met, state = 0 is automatically chosen.
*/
struct adc_jack_cond {
- u32 state; /* extcon state value. 0 if invalid */
+ u32 state; /* extcon state value. 0 if invalid */
u32 min_adc;
u32 max_adc;
};
/**
* struct adc_jack_pdata - platform data for adc jack device.
- * @name - name of the extcon device. If null, "adc-jack" is used.
- * @consumer_channel - Unique name to identify the channel on the consumer
- * side. This typically describes the channels used within
- * the consumer. E.g. 'battery_voltage'
- * @cable_names - array of cable names ending with null.
- * @adc_contitions - array of struct adc_jack_cond conditions ending
- * with .state = 0 entry. This describes how to decode
- * adc values into extcon state.
- * @irq_flags - irq flags used for the @irq
- * @handling_delay_ms - in some devices, we need to read ADC value some
- * milli-seconds after the interrupt occurs. You may
- * describe such delays with @handling_delay_ms, which
- * is rounded-off by jiffies.
+ * @name: name of the extcon device. If null, "adc-jack" is used.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels used within
+ * the consumer. E.g. 'battery_voltage'
+ * @cable_names: array of cable names ending with null.
+ * @adc_contitions: array of struct adc_jack_cond conditions ending
+ * with .state = 0 entry. This describes how to decode
+ * adc values into extcon state.
+ * @irq_flags: irq flags used for the @irq
+ * @handling_delay_ms: in some devices, we need to read ADC value some
+ * milli-seconds after the interrupt occurs. You may
+ * describe such delays with @handling_delay_ms, which
+ * is rounded-off by jiffies.
*/
struct adc_jack_pdata {
const char *name;
const char *consumer_channel;
- /*
- * The last entry should be NULL
- */
+
+ /* The last entry should be NULL */
const char **cable_names;
+
/* The last entry's state should be 0 */
struct adc_jack_cond *adc_conditions;
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 2d8307f7d67d..4195810f87fe 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -25,14 +25,17 @@
/**
* struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device.
- * @name The name of this GPIO extcon device.
- * @gpio Corresponding GPIO.
- * @debounce Debounce time for GPIO IRQ in ms.
- * @irq_flags IRQ Flags (e.g., IRQF_TRIGGER_LOW).
- * @state_on print_state is overriden with state_on if attached. If Null,
- * default method of extcon class is used.
- * @state_off print_state is overriden with state_on if detached. If Null,
- * default method of extcon class is used.
+ * @name: The name of this GPIO extcon device.
+ * @gpio: Corresponding GPIO.
+ * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
+ * If true, low state of gpio means active.
+ * If false, high state of gpio means active.
+ * @debounce: Debounce time for GPIO IRQ in ms.
+ * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
+ * @state_on: print_state is overriden with state_on if attached.
+ * If NULL, default method of extcon class is used.
+ * @state_off: print_state is overriden with state_on if detached.
+ * If NUll, default method of extcon class is used.
*
* Note that in order for state_on or state_off to be valid, both state_on
* and state_off should be not NULL. If at least one of them is NULL,
@@ -41,6 +44,7 @@
struct gpio_extcon_platform_data {
const char *name;
unsigned gpio;
+ bool gpio_active_low;
unsigned long debounce;
unsigned long irq_flags;
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ffac70aab3e9..70c4836e4a9f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -792,4 +792,16 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
+/* Convenience logging macros */
+#define fb_err(fb_info, fmt, ...) \
+ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_notice(info, fmt, ...) \
+ pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn(fb_info, fmt, ...) \
+ pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_info(fb_info, fmt, ...) \
+ pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_dbg(fb_info, fmt, ...) \
+ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
index e460ef831984..5009fa16b5d8 100644
--- a/include/linux/fcdevice.h
+++ b/include/linux/fcdevice.h
@@ -27,7 +27,7 @@
#include <linux/if_fc.h>
#ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
#endif
#endif /* _LINUX_FCDEVICE_H */
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index 155bafd9e886..9a79f0106da1 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -25,10 +25,9 @@
#include <linux/if_fddi.h>
#ifdef __KERNEL__
-extern __be16 fddi_type_trans(struct sk_buff *skb,
- struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
#endif
#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a6ac84871d6d..ff4e40cd45b1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
#include <linux/atomic.h>
#include <linux/compat.h>
+#include <linux/workqueue.h>
#include <uapi/linux/filter.h>
#ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
{
atomic_t refcnt;
unsigned int len; /* Number of filter blocks */
+ struct rcu_head rcu;
unsigned int (*bpf_func)(const struct sk_buff *skb,
const struct sock_filter *filter);
- struct rcu_head rcu;
- struct sock_filter insns[0];
+ union {
+ struct sock_filter insns[0];
+ struct work_struct work;
+ };
};
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
{
- return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+ return max(sizeof(struct sk_filter),
+ offsetof(struct sk_filter, insns[proglen]));
}
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
}
#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
#else
+#include <linux/slab.h>
static inline void bpf_jit_compile(struct sk_filter *fp)
{
}
static inline void bpf_jit_free(struct sk_filter *fp)
{
+ kfree(fp);
}
#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b4cd8296e41..121f11f001c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -10,6 +10,7 @@
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
+#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
@@ -622,10 +623,13 @@ static inline int inode_unhashed(struct inode *inode)
* 0: the object of the current VFS operation
* 1: parent
* 2: child/target
- * 3: quota file
+ * 3: xattr
+ * 4: second non-directory
+ * The last is for certain operations (such as rename) which lock two
+ * non-directories at once.
*
* The locking order between these classes is
- * parent -> child -> normal -> xattr -> quota
+ * parent -> child -> normal -> xattr -> second non-directory
*/
enum inode_i_mutex_lock_class
{
@@ -633,9 +637,12 @@ enum inode_i_mutex_lock_class
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
- I_MUTEX_QUOTA
+ I_MUTEX_NONDIR2
};
+void lock_two_nondirectories(struct inode *, struct inode*);
+void unlock_two_nondirectories(struct inode *, struct inode*);
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -763,12 +770,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
#define FILE_MNT_WRITE_RELEASED 2
struct file {
- /*
- * fu_list becomes invalid after file_free is called and queued via
- * fu_rcuhead for RCU freeing
- */
union {
- struct list_head fu_list;
struct llist_node fu_llist;
struct rcu_head fu_rcuhead;
} f_u;
@@ -782,9 +784,6 @@ struct file {
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
-#ifdef CONFIG_SMP
- int f_sb_list_cpu;
-#endif
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
@@ -881,6 +880,7 @@ static inline int file_check_writeable(struct file *filp)
#define FL_POSIX 1
#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
#define FL_ACCESS 8 /* not trying to lock, just looking */
#define FL_EXISTS 16 /* when unlocking, test for existence */
#define FL_LEASE 32 /* lease held on this file */
@@ -1022,7 +1022,7 @@ extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags);
+extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **);
extern int vfs_setlease(struct file *, long, struct file_lock **);
@@ -1131,7 +1131,7 @@ static inline int flock_lock_file_wait(struct file *filp,
return -ENOLCK;
}
-static inline int __break_lease(struct inode *inode, unsigned int mode)
+static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
return 0;
}
@@ -1263,21 +1263,7 @@ struct super_block {
struct list_head s_inodes; /* all inodes */
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
-#ifdef CONFIG_SMP
- struct list_head __percpu *s_files;
-#else
- struct list_head s_files;
-#endif
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
- struct list_head s_dentry_lru; /* unused dentry lru */
- int s_nr_dentry_unused; /* # of dentry on lru */
-
- /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
- spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
- struct list_head s_inode_lru; /* unused inode lru */
- int s_nr_inodes_unused; /* # of inodes on lru */
-
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
@@ -1331,11 +1317,15 @@ struct super_block {
/* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq;
-};
-/* superblock cache pruning functions */
-extern void prune_icache_sb(struct super_block *sb, int nr_to_scan);
-extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan);
+ /*
+ * Keep the lru lists last in the structure so they always sit on their
+ * own individual cachelines.
+ */
+ struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
+ struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
+ struct rcu_head rcu;
+};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1463,10 +1453,10 @@ extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
+extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **);
/*
* VFS dentry helper functions.
@@ -1629,8 +1619,8 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- int (*nr_cached_objects)(struct super_block *);
- void (*free_cached_objects)(struct super_block *, int);
+ long (*nr_cached_objects)(struct super_block *, int);
+ long (*free_cached_objects)(struct super_block *, long, int);
};
/*
@@ -1880,6 +1870,17 @@ extern struct dentry *mount_pseudo(struct file_system_type *, char *,
(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
#define fops_put(fops) \
do { if (fops) module_put((fops)->owner); } while(0)
+/*
+ * This one is to be used *ONLY* from ->open() instances.
+ * fops must be non-NULL, pinned down *and* module dependencies
+ * should be sufficient to pin the caller down as well.
+ */
+#define replace_fops(f, fops) \
+ do { \
+ struct file *__file = (f); \
+ fops_put(__file->f_op); \
+ BUG_ON(!(__file->f_op = (fops))); \
+ } while(0)
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
@@ -1900,9 +1901,13 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
+extern bool fs_fully_visible(struct file_system_type *);
extern int current_umask(void);
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -1959,9 +1964,39 @@ static inline int locks_verify_truncate(struct inode *inode,
static inline int break_lease(struct inode *inode, unsigned int mode)
{
if (inode->i_flock)
- return __break_lease(inode, mode);
+ return __break_lease(inode, mode, FL_LEASE);
return 0;
}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ if (inode->i_flock)
+ return __break_lease(inode, mode, FL_DELEG);
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
+ if (ret == -EWOULDBLOCK && delegated_inode) {
+ *delegated_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(*delegated_inode, O_WRONLY);
+ iput(*delegated_inode);
+ *delegated_inode = NULL;
+ return ret;
+}
+
#else /* !CONFIG_FILE_LOCKING */
static inline int locks_mandatory_locked(struct inode *inode)
{
@@ -2001,6 +2036,22 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
return 0;
}
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
#endif /* CONFIG_FILE_LOCKING */
/* fs/open.c */
@@ -2073,6 +2124,7 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
+extern int sb_is_blkdev_sb(struct super_block *sb);
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2092,6 +2144,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
{
}
+
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+ return 0;
+}
#endif
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
@@ -2221,7 +2278,7 @@ extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
extern sector_t bmap(struct inode *, sector_t);
#endif
-extern int notify_change(struct dentry *, struct iattr *);
+extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
@@ -2290,6 +2347,11 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+static inline bool inode_is_open_for_write(const struct inode *inode)
+{
+ return atomic_read(&inode->i_writecount) > 0;
+}
+
#ifdef CONFIG_IMA
static inline void i_readcount_dec(struct inode *inode)
{
@@ -2330,8 +2392,6 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
-extern void ihold(struct inode * inode);
-extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
@@ -2493,7 +2553,6 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
-extern int vfs_follow_link(struct nameidata *, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
extern void *page_follow_link_light(struct dentry *, struct nameidata *);
extern void page_put_link(struct dentry *, struct nameidata *, void *);
@@ -2501,8 +2560,10 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
extern int generic_readlink(struct dentry *, char __user *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
+int vfs_getattr_nosec(struct path *path, struct kstat *stat);
extern int vfs_getattr(struct path *, struct kstat *);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -2561,6 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
+extern int always_delete_dentry(const struct dentry *);
+extern struct inode *alloc_anon_inode(struct super_block *);
+extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 2b93a9a5a1e6..0efc3e62843a 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -39,17 +39,6 @@ static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
spin_unlock(&fs->lock);
}
-static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
- struct path *pwd)
-{
- spin_lock(&fs->lock);
- *root = fs->root;
- path_get(root);
- *pwd = fs->pwd;
- path_get(pwd);
- spin_unlock(&fs->lock);
-}
-
extern bool current_chrooted(void);
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index a9ff9a36b86d..771484993ca7 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -251,6 +251,10 @@ struct fscache_cache_ops {
/* unpin an object in the cache */
void (*unpin_object)(struct fscache_object *object);
+ /* check the consistency between the backing cache and the FS-Cache
+ * cookie */
+ bool (*check_consistency)(struct fscache_operation *op);
+
/* store the updated auxiliary data on an object */
void (*update_object)(struct fscache_object *object);
@@ -304,36 +308,6 @@ struct fscache_cache_ops {
void (*dissociate_pages)(struct fscache_cache *cache);
};
-/*
- * data file or index object cookie
- * - a file will only appear in one cache
- * - a request to cache a file may or may not be honoured, subject to
- * constraints such as disk space
- * - indices are created on disk just-in-time
- */
-struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
- spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
- unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_RETIRED 5 /* T if cookie was retired */
-};
-
extern struct fscache_cookie fscache_fsdef_index;
/*
@@ -396,6 +370,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
+#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
@@ -507,6 +482,11 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
op->end_io_func(page, op->context, error);
}
+static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
+{
+ atomic_inc(&cookie->n_active);
+}
+
/**
* fscache_use_cookie - Request usage of cookie attached to an object
* @object: Object description
@@ -520,6 +500,16 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
return atomic_inc_not_zero(&cookie->n_active) != 0;
}
+static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+{
+ return atomic_dec_and_test(&cookie->n_active);
+}
+
+static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+{
+ wake_up_atomic_t(&cookie->n_active);
+}
+
/**
* fscache_unuse_cookie - Cease usage of cookie attached to an object
* @object: Object description
@@ -530,8 +520,8 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
static inline void fscache_unuse_cookie(struct fscache_object *object)
{
struct fscache_cookie *cookie = object->cookie;
- if (atomic_dec_and_test(&cookie->n_active))
- wake_up_atomic_t(&cookie->n_active);
+ if (__fscache_unuse_cookie(cookie))
+ __fscache_wake_unused_cookie(cookie);
}
/*
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 7a086235da4b..115bb81912cc 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -167,6 +167,42 @@ struct fscache_netfs {
};
/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ * constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+ atomic_t usage; /* number of users of this cookie */
+ atomic_t n_children; /* number of children of this cookie */
+ atomic_t n_active; /* number of active users of netfs ptrs */
+ spinlock_t lock;
+ spinlock_t stores_lock; /* lock on page store tree */
+ struct hlist_head backing_objects; /* object(s) backing this file/index */
+ const struct fscache_cookie_def *def; /* definition */
+ struct fscache_cookie *parent; /* parent of this entry */
+ void *netfs_data; /* back pointer to netfs */
+ struct radix_tree_root stores; /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
+
+ unsigned long flags;
+#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
+#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+};
+
+static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
+{
+ return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+}
+
+/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
* - these are not to be called directly
@@ -181,8 +217,9 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
extern struct fscache_cookie *__fscache_acquire_cookie(
struct fscache_cookie *,
const struct fscache_cookie_def *,
- void *);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
+ void *, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern int __fscache_check_consistency(struct fscache_cookie *);
extern void __fscache_update_cookie(struct fscache_cookie *);
extern int __fscache_attr_changed(struct fscache_cookie *);
extern void __fscache_invalidate(struct fscache_cookie *);
@@ -208,6 +245,11 @@ extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
gfp_t);
extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
struct inode *);
+extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
+ struct list_head *pages);
+extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *,
+ bool (*)(void *), void *);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
@@ -286,6 +328,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
* @def: A description of the cache object, including callback operations
* @netfs_data: An arbitrary piece of data to be kept in the cookie to
* represent the cache object to the netfs
+ * @enable: Whether or not to enable a data cookie immediately
*
* This function is used to inform FS-Cache about part of an index hierarchy
* that can be used to locate files. This is done by requesting a cookie for
@@ -298,10 +341,12 @@ static inline
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
- void *netfs_data)
+ void *netfs_data,
+ bool enable)
{
- if (fscache_cookie_valid(parent))
- return __fscache_acquire_cookie(parent, def, netfs_data);
+ if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
+ return __fscache_acquire_cookie(parent, def, netfs_data,
+ enable);
else
return NULL;
}
@@ -319,13 +364,32 @@ struct fscache_cookie *fscache_acquire_cookie(
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
__fscache_relinquish_cookie(cookie, retire);
}
/**
+ * fscache_check_consistency - Request that if the cache is updated
+ * @cookie: The cookie representing the cache object
+ *
+ * Request an consistency check from fscache, which passes the request
+ * to the backing cache.
+ *
+ * Returns 0 if consistent and -ESTALE if inconsistent. May also
+ * return -ENOMEM and -ERESTARTSYS.
+ */
+static inline
+int fscache_check_consistency(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_check_consistency(cookie);
+ else
+ return 0;
+}
+
+/**
* fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
*
@@ -338,7 +402,7 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
static inline
void fscache_update_cookie(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
__fscache_update_cookie(cookie);
}
@@ -385,7 +449,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie)
static inline
int fscache_attr_changed(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_attr_changed(cookie);
else
return -ENOBUFS;
@@ -407,7 +471,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
static inline
void fscache_invalidate(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
__fscache_invalidate(cookie);
}
@@ -481,7 +545,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
void *context,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_read_or_alloc_page(cookie, page, end_io_func,
context, gfp);
else
@@ -532,7 +596,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
void *context,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_read_or_alloc_pages(cookie, mapping, pages,
nr_pages, end_io_func,
context, gfp);
@@ -563,13 +627,33 @@ int fscache_alloc_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_alloc_page(cookie, page, gfp);
else
return -ENOBUFS;
}
/**
+ * fscache_readpages_cancel - Cancel read/alloc on pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @pages: The netfs pages that we canceled write on in readpages()
+ *
+ * Uncache/unreserve the pages reserved earlier in readpages() via
+ * fscache_readpages_or_alloc() and similar. In most successful caches in
+ * readpages() this doesn't do anything. In cases when the underlying netfs's
+ * readahead failed we need to clean up the pagelist (unmark and uncache).
+ *
+ * This function may sleep as it may have to clean up disk state.
+ */
+static inline
+void fscache_readpages_cancel(struct fscache_cookie *cookie,
+ struct list_head *pages)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_readpages_cancel(cookie, pages);
+}
+
+/**
* fscache_write_page - Request storage of a page in the cache
* @cookie: The cookie representing the cache object
* @page: The netfs page to store
@@ -592,7 +676,7 @@ int fscache_write_page(struct fscache_cookie *cookie,
struct page *page,
gfp_t gfp)
{
- if (fscache_cookie_valid(cookie))
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
return __fscache_write_page(cookie, page, gfp);
else
return -ENOBUFS;
@@ -702,4 +786,47 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
__fscache_uncache_all_inode_pages(cookie, inode);
}
+/**
+ * fscache_disable_cookie - Disable a cookie
+ * @cookie: The cookie representing the cache object
+ * @invalidate: Invalidate the backing object
+ *
+ * Disable a cookie from accepting further alloc, read, write, invalidate,
+ * update or acquire operations. Outstanding operations can still be waited
+ * upon and pages can still be uncached and the cookie relinquished.
+ *
+ * This will not return until all outstanding operations have completed.
+ *
+ * If @invalidate is set, then the backing object will be invalidated and
+ * detached, otherwise it will just be detached.
+ */
+static inline
+void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ __fscache_disable_cookie(cookie, invalidate);
+}
+
+/**
+ * fscache_enable_cookie - Reenable a cookie
+ * @cookie: The cookie representing the cache object
+ * @can_enable: A function to permit enablement once lock is held
+ * @data: Data for can_enable()
+ *
+ * Reenable a previously disabled cookie, allowing it to accept further alloc,
+ * read, write, invalidate, update or acquire operations. An attempt will be
+ * made to immediately reattach the cookie to a backing object.
+ *
+ * The can_enable() function is called (if not NULL) once the enablement lock
+ * is held to rule on whether enablement is still permitted to go ahead.
+ */
+static inline
+void fscache_enable_cookie(struct fscache_cookie *cookie,
+ bool (*can_enable)(void *data),
+ void *data)
+{
+ if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
+ __fscache_enable_cookie(cookie, can_enable, data);
+}
+
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
deleted file mode 100644
index 55d870238399..000000000000
--- a/include/linux/fsl/mxs-dma.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MACH_MXS_DMA_H__
-#define __MACH_MXS_DMA_H__
-
-#include <linux/dmaengine.h>
-
-struct mxs_dma_data {
- int chan_irq;
-};
-
-extern int mxs_dma_is_apbh(struct dma_chan *chan);
-extern int mxs_dma_is_apbx(struct dma_chan *chan);
-#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9f15c0064c50..31ea4b428360 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -533,11 +533,11 @@ static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
-static inline int register_ftrace_command(struct ftrace_func_command *cmd)
+static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
}
-static inline int unregister_ftrace_command(char *cmd_name)
+static inline __init int unregister_ftrace_command(char *cmd_name)
{
return -EINVAL;
}
@@ -721,6 +721,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
+#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 5eaa746735ff..8c9b7a1c4138 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -202,6 +202,7 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT,
+ TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
};
/*
@@ -213,6 +214,7 @@ enum {
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
+ * USE_CALL_FILTER - For ftrace internal events, don't use file filter
*/
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -220,6 +222,7 @@ enum {
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
+ TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
};
struct ftrace_event_call {
@@ -238,12 +241,16 @@ struct ftrace_event_call {
* bit 2: failed to apply filter
* bit 3: ftrace internal event (do not enable)
* bit 4: Event was enabled by module
+ * bit 5: use call filter rather than file filter
*/
int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS
int perf_refcount;
struct hlist_head __percpu *perf_events;
+
+ int (*perf_perm)(struct ftrace_event_call *,
+ struct perf_event *);
#endif
};
@@ -253,6 +260,8 @@ struct ftrace_subsystem_dir;
enum {
FTRACE_EVENT_FL_ENABLED_BIT,
FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+ FTRACE_EVENT_FL_FILTERED_BIT,
+ FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
FTRACE_EVENT_FL_SOFT_MODE_BIT,
FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
};
@@ -261,6 +270,8 @@ enum {
* Ftrace event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
+ * FILTERED - The event has a filter attached
+ * NO_SET_FILTER - Set when filter has error and is to be ignored
* SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
* SOFT_DISABLED - When set, do not trace the event (even though its
* tracepoint may be enabled)
@@ -268,6 +279,8 @@ enum {
enum {
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+ FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
+ FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
};
@@ -275,6 +288,7 @@ enum {
struct ftrace_event_file {
struct list_head list;
struct ftrace_event_call *event_call;
+ struct event_filter *filter;
struct dentry *dir;
struct trace_array *tr;
struct ftrace_subsystem_dir *system;
@@ -306,16 +320,33 @@ struct ftrace_event_file {
} \
early_initcall(trace_init_flags_##name);
+#define __TRACE_EVENT_PERF_PERM(name, expr...) \
+ static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ struct perf_event *p_event) \
+ { \
+ return ({ expr; }); \
+ } \
+ static int __init trace_init_perf_perm_##name(void) \
+ { \
+ event_##name.perf_perm = &perf_perm_##name; \
+ return 0; \
+ } \
+ early_initcall(trace_init_perf_perm_##name);
+
#define PERF_MAX_TRACE_SIZE 2048
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
-extern void destroy_preds(struct ftrace_event_call *call);
+extern void destroy_preds(struct ftrace_event_file *file);
+extern void destroy_call_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct event_filter *filter, void *rec);
-extern int filter_current_check_discard(struct ring_buffer *buffer,
- struct ftrace_event_call *call,
- void *rec,
- struct ring_buffer_event *event);
+
+extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
enum {
FILTER_OTHER = 0,
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 661d374aeb2d..1eda33d7cb10 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -66,8 +66,8 @@ struct gen_pool_chunk {
struct list_head next_chunk; /* next chunk in pool */
atomic_t avail;
phys_addr_t phys_addr; /* physical starting address of memory chunk */
- unsigned long start_addr; /* starting address of memory chunk */
- unsigned long end_addr; /* ending address of memory chunk */
+ unsigned long start_addr; /* start address of memory chunk */
+ unsigned long end_addr; /* end address of memory chunk (inclusive) */
unsigned long bits[0]; /* bitmap for allocating memory chunk */
};
@@ -94,6 +94,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
}
extern void gen_pool_destroy(struct gen_pool *);
extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma);
extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
extern void gen_pool_for_each_chunk(struct gen_pool *,
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 023bc346b877..c0894dd8827b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -273,49 +273,40 @@ static struct genl_family ZZZ_genl_family __read_mostly = {
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
+#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
+#undef GENL_mc_group
+#define GENL_mc_group(group) { .name = #group, },
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+#undef GENL_mc_group
+#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static struct genl_multicast_group \
-CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \
- .name = #group, \
-}; \
static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \
- if (!group_id) \
- return -EINVAL; \
- return genlmsg_multicast(skb, 0, group_id, flags); \
+ CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
+ group_id, flags); \
}
#include GENL_MAGIC_INCLUDE_FILE
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
-{
- int err = genl_register_family_with_ops(&ZZZ_genl_family,
- ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops));
- if (err)
- return err;
-#undef GENL_mc_group
-#define GENL_mc_group(group) \
- err = genl_register_mc_group(&ZZZ_genl_family, \
- &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \
- if (err) \
- goto fail; \
- else \
- pr_info("%s: mcg %s: %u\n", #group, \
- __stringify(GENL_MAGIC_FAMILY), \
- CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id);
-
-#include GENL_MAGIC_INCLUDE_FILE
-
#undef GENL_mc_group
#define GENL_mc_group(group)
- return 0;
-fail:
- genl_unregister_family(&ZZZ_genl_family);
- return err;
+
+int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+{
+ return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
+ ZZZ_genl_ops, \
+ ZZZ_genl_mcgrps);
}
void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 552e3f46e4a3..13dfd24d01ab 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -16,14 +16,17 @@
#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+/* Gpio pin is active-low */
+#define GPIOF_ACTIVE_LOW (1 << 2)
+
/* Gpio pin is open drain */
-#define GPIOF_OPEN_DRAIN (1 << 2)
+#define GPIOF_OPEN_DRAIN (1 << 3)
/* Gpio pin is open source */
-#define GPIOF_OPEN_SOURCE (1 << 3)
+#define GPIOF_OPEN_SOURCE (1 << 4)
-#define GPIOF_EXPORT (1 << 4)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 5)
+#define GPIOF_EXPORT (1 << 5)
+#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
@@ -74,12 +77,22 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
+
+struct device;
+
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label);
+void devm_gpio_free(struct device *dev, unsigned int gpio);
+
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
+#include <linux/pinctrl/pinctrl.h>
struct device;
struct gpio_chip;
@@ -204,6 +217,18 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
+static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void gpio_unlock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ WARN_ON(1);
+}
+
static inline int irq_to_gpio(unsigned irq)
{
/* irq can never have been returned from gpio_to_irq() */
@@ -220,20 +245,40 @@ gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
return -EINVAL;
}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
static inline void
gpiochip_remove_pin_ranges(struct gpio_chip *chip)
{
WARN_ON(1);
}
-#endif /* ! CONFIG_GPIOLIB */
+static inline int devm_gpio_request(struct device *dev, unsigned gpio,
+ const char *label)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
-struct device;
+static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
-/* bindings for managed devices that want to request gpios */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
-int devm_gpio_request_one(struct device *dev, unsigned gpio,
- unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
+static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
+{
+ WARN_ON(1);
+}
+
+#endif /* ! CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
new file mode 100644
index 000000000000..4d34dbbbad4d
--- /dev/null
+++ b/include/linux/gpio/consumer.h
@@ -0,0 +1,253 @@
+#ifndef __LINUX_GPIO_CONSUMER_H
+#define __LINUX_GPIO_CONSUMER_H
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_GPIOLIB
+
+struct device;
+struct gpio_chip;
+
+/**
+ * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
+ * preferable to the old integer-based handles.
+ *
+ * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
+ * until the GPIO is released.
+ */
+struct gpio_desc;
+
+/* Acquire and dispose GPIOs */
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
+ const char *con_id);
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx);
+void gpiod_put(struct gpio_desc *desc);
+
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
+ const char *con_id);
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx);
+void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+
+int gpiod_get_direction(const struct gpio_desc *desc);
+int gpiod_direction_input(struct gpio_desc *desc);
+int gpiod_direction_output(struct gpio_desc *desc, int value);
+
+/* Value get/set from non-sleeping context */
+int gpiod_get_value(const struct gpio_desc *desc);
+void gpiod_set_value(struct gpio_desc *desc, int value);
+int gpiod_get_raw_value(const struct gpio_desc *desc);
+void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+
+/* Value get/set from sleeping context */
+int gpiod_get_value_cansleep(const struct gpio_desc *desc);
+void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
+void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+
+int gpiod_is_active_low(const struct gpio_desc *desc);
+int gpiod_cansleep(const struct gpio_desc *desc);
+
+int gpiod_to_irq(const struct gpio_desc *desc);
+
+/* Convert between the old gpio_ and new gpiod_ interfaces */
+struct gpio_desc *gpio_to_desc(unsigned gpio);
+int desc_to_gpio(const struct gpio_desc *desc);
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+
+#else /* CONFIG_GPIOLIB */
+
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+ const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void gpiod_put(struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
+ const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+
+static inline int gpiod_get_direction(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+static inline int gpiod_direction_input(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+
+static inline int gpiod_get_value(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+static inline int gpiod_is_active_low(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline int gpiod_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int gpiod_to_irq(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int desc_to_gpio(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
+static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+
+#endif /* CONFIG_GPIOLIB */
+
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
+
+int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
+void gpiod_unexport(struct gpio_desc *desc);
+
+#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+
+static inline int gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+{
+ return -ENOSYS;
+}
+
+static inline void gpiod_unexport(struct gpio_desc *desc)
+{
+}
+
+#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+
+#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
new file mode 100644
index 000000000000..3ea2cf6b0e6c
--- /dev/null
+++ b/include/linux/gpio/driver.h
@@ -0,0 +1,194 @@
+#ifndef __LINUX_GPIO_DRIVER_H
+#define __LINUX_GPIO_DRIVER_H
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+struct device;
+struct gpio_desc;
+struct of_phandle_args;
+struct device_node;
+struct seq_file;
+
+/**
+ * struct gpio_chip - abstract a GPIO controller
+ * @label: for diagnostics
+ * @dev: optional device providing the GPIOs
+ * @owner: helps prevent removal of modules exporting active GPIOs
+ * @list: links gpio_chips together for traversal
+ * @request: optional hook for chip-specific activation, such as
+ * enabling module power and clock; may sleep
+ * @free: optional hook for chip-specific deactivation, such as
+ * disabling module power and clock; may sleep
+ * @get_direction: returns direction for signal "offset", 0=out, 1=in,
+ * (same as GPIOF_DIR_XXX), or negative error
+ * @direction_input: configures signal "offset" as input, or returns error
+ * @direction_output: configures signal "offset" as output, or returns error
+ * @get: returns value for signal "offset"; for output signals this
+ * returns either the value actually sensed, or zero
+ * @set: assigns output value for signal "offset"
+ * @set_debounce: optional hook for setting debounce time for specified gpio in
+ * interrupt triggered gpio chips
+ * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * implementation may not sleep
+ * @dbg_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra
+ * state (such as pullup/pulldown configuration).
+ * @base: identifies the first GPIO number handled by this chip; or, if
+ * negative during registration, requests dynamic ID allocation.
+ * @ngpio: the number of GPIOs handled by this controller; the last GPIO
+ * handled is (base + ngpio - 1).
+ * @desc: array of ngpio descriptors. Private.
+ * @can_sleep: flag must be set iff get()/set() methods sleep, as they
+ * must while accessing GPIO expander chips over I2C or SPI
+ * @names: if set, must be an array of strings to use as alternative
+ * names for the GPIOs in this chip. Any entry in the array
+ * may be NULL if there is no alias for the GPIO, however the
+ * array must be @ngpio entries long. A name can include a single printk
+ * format specifier for an unsigned int. It is substituted by the actual
+ * number of the gpio.
+ *
+ * A gpio_chip can help platforms abstract various sources of GPIOs so
+ * they can all be accessed through a common programing interface.
+ * Example sources would be SOC controllers, FPGAs, multifunction
+ * chips, dedicated GPIO expanders, and so on.
+ *
+ * Each chip controls a number of signals, identified in method calls
+ * by "offset" values in the range 0..(@ngpio - 1). When those signals
+ * are referenced through calls like gpio_get_value(gpio), the offset
+ * is calculated by subtracting @base from the gpio number.
+ */
+struct gpio_chip {
+ const char *label;
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+
+ int (*request)(struct gpio_chip *chip,
+ unsigned offset);
+ void (*free)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*get_direction)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_input)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ int (*get)(struct gpio_chip *chip,
+ unsigned offset);
+ void (*set)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ int (*set_debounce)(struct gpio_chip *chip,
+ unsigned offset,
+ unsigned debounce);
+
+ int (*to_irq)(struct gpio_chip *chip,
+ unsigned offset);
+
+ void (*dbg_show)(struct seq_file *s,
+ struct gpio_chip *chip);
+ int base;
+ u16 ngpio;
+ struct gpio_desc *desc;
+ const char *const *names;
+ unsigned can_sleep:1;
+ unsigned exported:1;
+
+#if defined(CONFIG_OF_GPIO)
+ /*
+ * If CONFIG_OF is enabled, then all GPIO controllers described in the
+ * device tree automatically may have an OF translation
+ */
+ struct device_node *of_node;
+ int of_gpio_n_cells;
+ int (*of_xlate)(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags);
+#endif
+#ifdef CONFIG_PINCTRL
+ /*
+ * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
+ * describe the actual pin range which they serve in an SoC. This
+ * information would be used by pinctrl subsystem to configure
+ * corresponding pins for gpio usage.
+ */
+ struct list_head pin_ranges;
+#endif
+};
+
+extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+ unsigned offset);
+
+/* add/remove chips */
+extern int gpiochip_add(struct gpio_chip *chip);
+extern int __must_check gpiochip_remove(struct gpio_chip *chip);
+extern struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *chip, void *data));
+
+/* lock/unlock as IRQ */
+int gpiod_lock_as_irq(struct gpio_desc *desc);
+void gpiod_unlock_as_irq(struct gpio_desc *desc);
+
+enum gpio_lookup_flags {
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+};
+
+/**
+ * Lookup table for associating GPIOs to specific devices and functions using
+ * platform data.
+ */
+struct gpiod_lookup {
+ struct list_head list;
+ /*
+ * name of the chip the GPIO belongs to
+ */
+ const char *chip_label;
+ /*
+ * hardware number (i.e. relative to the chip) of the GPIO
+ */
+ u16 chip_hwnum;
+ /*
+ * name of device that can claim this GPIO
+ */
+ const char *dev_id;
+ /*
+ * name of the GPIO from the device's point of view
+ */
+ const char *con_id;
+ /*
+ * index of the GPIO in case several GPIOs share the same name
+ */
+ unsigned int idx;
+ /*
+ * mask of GPIO_* values
+ */
+ enum gpio_lookup_flags flags;
+};
+
+/*
+ * Simple definition of a single GPIO under a con_id
+ */
+#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _dev_id, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _dev_id, _con_id, 0, _flags)
+
+/*
+ * Use this macro if you need to have several GPIOs under the same con_id.
+ * Each GPIO needs to use a different index and can be accessed using
+ * gpiod_get_index()
+ */
+#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _dev_id, _con_id, _idx, \
+ _flags) \
+{ \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ .idx = _idx, \
+ .flags = _flags, \
+}
+
+void gpiod_add_table(struct gpiod_lookup *table, size_t size);
+
+#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index ccfe17c5c8da..d9cf963ac832 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -7,11 +7,7 @@
#include <linux/vtime.h>
-#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
extern void synchronize_irq(unsigned int irq);
-#else
-# define synchronize_irq(irq) barrier()
-#endif
#if defined(CONFIG_TINY_RCU)
@@ -37,7 +33,7 @@ extern void rcu_nmi_exit(void);
#define __irq_enter() \
do { \
account_irq_enter_time(current); \
- add_preempt_count(HARDIRQ_OFFSET); \
+ preempt_count_add(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
} while (0)
@@ -53,7 +49,7 @@ extern void irq_enter(void);
do { \
trace_hardirq_exit(); \
account_irq_exit_time(current); \
- sub_preempt_count(HARDIRQ_OFFSET); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
/*
@@ -66,7 +62,7 @@ extern void irq_exit(void);
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
- add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
trace_hardirq_enter(); \
} while (0)
@@ -76,7 +72,7 @@ extern void irq_exit(void);
trace_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
- sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
+ preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
} while (0)
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
index a9df51f5d54c..519b6e2d769e 100644
--- a/include/linux/hashtable.h
+++ b/include/linux/hashtable.h
@@ -174,6 +174,21 @@ static inline void hash_del_rcu(struct hlist_node *node)
member)
/**
+ * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
+ * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ *
+ * This is the same as hash_for_each_possible_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
+ hlist_for_each_entry_rcu_notrace(obj, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 32ba45158d39..b914ca3f57ba 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -21,6 +21,8 @@
#include <linux/hid.h>
#include <linux/hid-sensor-ids.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
/**
* struct hid_sensor_hub_attribute_info - Attribute info
@@ -40,6 +42,8 @@ struct hid_sensor_hub_attribute_info {
s32 units;
s32 unit_expo;
s32 size;
+ s32 logical_minimum;
+ s32 logical_maximum;
};
/**
@@ -47,11 +51,13 @@ struct hid_sensor_hub_attribute_info {
* @hdev: Stores the hid instance.
* @vendor_id: Vendor id of hub device.
* @product_id: Product id of hub device.
+ * @ref_cnt: Number of MFD clients have opened this device
*/
struct hid_sensor_hub_device {
struct hid_device *hdev;
u32 vendor_id;
u32 product_id;
+ int ref_cnt;
};
/**
@@ -74,6 +80,22 @@ struct hid_sensor_hub_callbacks {
void *priv);
};
+/**
+* sensor_hub_device_open() - Open hub device
+* @hsdev: Hub device instance.
+*
+* Used to open hid device for sensor hub.
+*/
+int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
+
+/**
+* sensor_hub_device_clode() - Close hub device
+* @hsdev: Hub device instance.
+*
+* Used to clode hid device for sensor hub.
+*/
+void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
+
/* Registration functions */
/**
@@ -166,6 +188,7 @@ struct hid_sensor_common {
struct platform_device *pdev;
unsigned usage_id;
bool data_ready;
+ struct iio_trigger *trigger;
struct hid_sensor_hub_attribute_info poll;
struct hid_sensor_hub_attribute_info report_state;
struct hid_sensor_hub_attribute_info power_state;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 4f945d3ed49f..8323775ac21d 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -117,4 +117,16 @@
#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
+/* Power state enumerations */
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x00
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x01
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x02
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x03
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x04
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x05
+
+/* Report State enumerations */
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x00
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x01
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ee1ffc5e19c9..31b9d299ef6c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -756,6 +756,10 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index f148e4908410..8ec23fb0b412 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -31,11 +31,11 @@ struct hippi_cb {
__u32 ifield;
};
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
#endif
#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 000000000000..f5b9b87ac9a9
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+ HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_GR2D = 0x51,
+ HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*init)(struct host1x_client *client);
+ int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct list_head list;
+ struct device *parent;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ enum host1x_class class;
+ struct host1x_channel *channel;
+
+ struct host1x_syncpt **syncpts;
+ unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+ struct host1x_bo *(*get)(struct host1x_bo *bo);
+ void (*put)(struct host1x_bo *bo);
+ dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+ void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ void *(*mmap)(struct host1x_bo *bo);
+ void (*munmap)(struct host1x_bo *bo, void *addr);
+ void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+ void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+ const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+ const struct host1x_bo_ops *ops)
+{
+ bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+ return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+ bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+ struct sg_table **sgt)
+{
+ return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+ bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+ return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+ bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+ return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+ unsigned int pagenum, void *addr)
+{
+ bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
+#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
+
+struct host1x_syncpt_base;
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+ u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+ unsigned long flags);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+ struct host1x_bo *cmdbuf;
+ u32 cmdbuf_offset;
+ struct host1x_bo *target;
+ u32 target_offset;
+ u32 shift;
+ u32 pad;
+};
+
+struct host1x_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* List entry */
+ struct list_head list;
+
+ /* Channel where job is submitted to */
+ struct host1x_channel *channel;
+
+ u32 client;
+
+ /* Gathers and their memory */
+ struct host1x_job_gather *gathers;
+ unsigned int num_gathers;
+
+ /* Wait checks to be processed at submit time */
+ struct host1x_waitchk *waitchk;
+ unsigned int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct host1x_reloc *relocarray;
+ unsigned int num_relocs;
+ struct host1x_job_unpin_data *unpins;
+ unsigned int num_unpins;
+
+ dma_addr_t *addr_phys;
+ dma_addr_t *gather_addr_phys;
+ dma_addr_t *reloc_addr_phys;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Maximum time to wait for this job */
+ unsigned int timeout;
+
+ /* Index and number of slots used in the push buffer */
+ unsigned int first_get;
+ unsigned int num_slots;
+
+ /* Copy of gathers */
+ size_t gather_copy_size;
+ dma_addr_t gather_copy;
+ u8 *gather_copy_mapped;
+
+ /* Check if register is marked as an address reg */
+ int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+ /* Request a SETCLASS to this class */
+ u32 class;
+
+ /* Add a channel wait for previous ops to complete */
+ bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+ u32 num_cmdbufs, u32 num_relocs,
+ u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+ u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+ const struct of_device_id *subdevs;
+ struct list_head list;
+ const char *name;
+
+ int (*probe)(struct host1x_device *device);
+ int (*remove)(struct host1x_device *device);
+};
+
+int host1x_driver_register(struct host1x_driver *driver);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+struct host1x_device {
+ struct host1x_driver *driver;
+ struct list_head list;
+ struct device dev;
+
+ struct mutex subdevs_lock;
+ struct list_head subdevs;
+ struct list_head active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+ return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b60de92e2edc..91672e2deec3 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -54,7 +54,8 @@ enum page_check_address_pmd_flag {
extern pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
- enum page_check_address_pmd_flag flag);
+ enum page_check_address_pmd_flag flag,
+ spinlock_t **ptl);
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
@@ -96,9 +97,6 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
-extern int handle_pte_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
@@ -132,15 +130,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
-extern int __pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma);
+extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl);
/* mmap_sem must be held on entry */
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl)
{
VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
if (pmd_trans_huge(*pmd))
- return __pmd_trans_huge_lock(pmd, vma);
+ return __pmd_trans_huge_lock(pmd, vma, ptl);
else
return 0;
}
@@ -218,8 +216,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
-static inline int pmd_trans_huge_lock(pmd_t *pmd,
- struct vm_area_struct *vma)
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl)
{
return 0;
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c2b1801a160b..bd7e98752222 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool);
int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page_head);
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@@ -66,7 +67,9 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
vm_flags_t vm_flags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
-void copy_huge_page(struct page *dst, struct page *src);
+bool isolate_huge_page(struct page *page, struct list_head *list);
+void putback_active_hugepage(struct page *page);
+bool is_hugepage_active(struct page *page);
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -101,6 +104,11 @@ static inline int PageHuge(struct page *page)
return 0;
}
+static inline int PageHeadHuge(struct page *page_head)
+{
+ return 0;
+}
+
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -134,9 +142,12 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
return 0;
}
-static inline void copy_huge_page(struct page *dst, struct page *src)
+static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
+ return false;
}
+#define putback_active_hugepage(p) do {} while (0)
+#define is_hugepage_active(x) false
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
@@ -261,6 +272,8 @@ struct huge_bootmem_page {
};
struct page *alloc_huge_page_node(struct hstate *h, int nid);
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);
@@ -371,14 +384,38 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
+extern void dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
+int pmd_huge_support(void);
+/*
+ * Currently hugepage migration is enabled only for pmd-based hugepage.
+ * This function will be updated when hugepage migration is more widely
+ * supported.
+ */
+static inline int hugepage_migration_support(struct hstate *h)
+{
+ return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+}
+
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ if (huge_page_size(h) == PMD_SIZE)
+ return pmd_lockptr(mm, (pmd_t *) pte);
+ VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
+ return &mm->page_table_lock;
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_noerr(v, a, r) NULL
#define alloc_bootmem_huge_page(h) NULL
#define hstate_file(f) NULL
#define hstate_sizelog(s) NULL
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
+#define page_hstate(page) NULL
#define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK
#define vma_kernel_pagesize(v) PAGE_SIZE
@@ -396,6 +433,25 @@ static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
+#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define pmd_huge_support() 0
+#define hugepage_migration_support(h) 0
+
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ return &mm->page_table_lock;
+}
#endif /* CONFIG_HUGETLB_PAGE */
+static inline spinlock_t *huge_pte_lock(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ spinlock_t *ptl;
+
+ ptl = huge_pte_lockptr(h, mm, pte);
+ spin_lock(ptl);
+ return ptl;
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index f346e4d5381c..da0a680e2f6d 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -38,7 +38,7 @@ static inline int vid_to_reg(int val, u8 vrm)
return ((val >= 1100) && (val <= 1850) ?
((18499 - val * 10) / 25 + 5) / 10 : -1);
default:
- return -1;
+ return -EINVAL;
}
}
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index b2514f70d591..09354f6c1d63 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -15,9 +15,19 @@
#define _HWMON_H_
struct device;
+struct attribute_group;
struct device *hwmon_device_register(struct device *dev);
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
void hwmon_device_unregister(struct device *dev);
+void devm_hwmon_device_unregister(struct device *dev);
#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index a3b8b2e2d244..15da677478dd 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -30,10 +30,13 @@
/*
* Framework version for util services.
*/
+#define UTIL_FW_MINOR 0
+
+#define UTIL_WS2K8_FW_MAJOR 1
+#define UTIL_WS2K8_FW_VERSION (UTIL_WS2K8_FW_MAJOR << 16 | UTIL_FW_MINOR)
#define UTIL_FW_MAJOR 3
-#define UTIL_FW_MINOR 0
-#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
+#define UTIL_FW_VERSION (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
/*
@@ -429,15 +432,6 @@ struct hv_ring_buffer_info {
u32 ring_data_startoffset;
};
-struct hv_ring_buffer_debug_info {
- u32 current_interrupt_mask;
- u32 current_read_index;
- u32 current_write_index;
- u32 bytes_avail_toread;
- u32 bytes_avail_towrite;
-};
-
-
/*
*
* hv_get_ringbuffer_availbytes()
@@ -899,23 +893,6 @@ enum vmbus_channel_state {
CHANNEL_OPENED_STATE,
};
-struct vmbus_channel_debug_info {
- u32 relid;
- enum vmbus_channel_state state;
- uuid_le interfacetype;
- uuid_le interface_instance;
- u32 monitorid;
- u32 servermonitor_pending;
- u32 servermonitor_latency;
- u32 servermonitor_connectionid;
- u32 clientmonitor_pending;
- u32 clientmonitor_latency;
- u32 clientmonitor_connectionid;
-
- struct hv_ring_buffer_debug_info inbound;
- struct hv_ring_buffer_debug_info outbound;
-};
-
/*
* Represents each channel msg on the vmbus connection This is a
* variable-size data structure depending on the msg type itself
@@ -1181,19 +1158,8 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u64 *requestid);
-extern void vmbus_get_debug_info(struct vmbus_channel *channel,
- struct vmbus_channel_debug_info *debug);
-
extern void vmbus_ontimer(unsigned long data);
-struct hv_dev_port_info {
- u32 int_mask;
- u32 read_idx;
- u32 write_idx;
- u32 bytes_avail_toread;
- u32 bytes_avail_towrite;
-};
-
/* Base driver object */
struct hv_driver {
const char *name;
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 2ab11dc38077..eff50e062be8 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -205,7 +205,6 @@ struct i2c_driver {
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
- * @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
@@ -222,7 +221,6 @@ struct i2c_client {
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
- struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 81cbbdb96aae..673a3ce67f31 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -26,6 +26,7 @@
#define __TWL_H_
#include <linux/types.h>
+#include <linux/phy/phy.h>
#include <linux/input/matrix_keypad.h>
/*
@@ -615,6 +616,7 @@ enum twl4030_usb_mode {
struct twl4030_usb_data {
enum twl4030_usb_mode usb_mode;
unsigned long features;
+ struct phy_init_data *init_data;
int (*phy_init)(struct device *dev);
int (*phy_exit)(struct device *dev);
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index a986ff588944..0f9bafa17a02 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -31,6 +31,30 @@
#define I8042_CMD_MUX_PFX 0x0090
#define I8042_CMD_MUX_SEND 0x1090
+/*
+ * Status register bits.
+ */
+
+#define I8042_STR_PARITY 0x80
+#define I8042_STR_TIMEOUT 0x40
+#define I8042_STR_AUXDATA 0x20
+#define I8042_STR_KEYLOCK 0x10
+#define I8042_STR_CMDDAT 0x08
+#define I8042_STR_MUXERR 0x04
+#define I8042_STR_IBF 0x02
+#define I8042_STR_OBF 0x01
+
+/*
+ * Control register bits.
+ */
+
+#define I8042_CTR_KBDINT 0x01
+#define I8042_CTR_AUXINT 0x02
+#define I8042_CTR_IGNKEYLOCK 0x08
+#define I8042_CTR_KBDDIS 0x10
+#define I8042_CTR_AUXDIS 0x20
+#define I8042_CTR_XLATE 0x40
+
struct serio;
#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
diff --git a/include/linux/ide.h b/include/linux/ide.h
index b17974917dbf..46a14229a162 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -1514,7 +1514,7 @@ static inline void ide_set_max_pio(ide_drive_t *drive)
char *ide_media_string(ide_drive_t *);
-extern struct device_attribute ide_dev_attrs[];
+extern const struct attribute_group *ide_dev_groups[];
extern struct bus_type ide_bus_type;
extern struct class *ide_port_class;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index a5b598a79bec..8c3b26a21574 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -697,6 +697,18 @@ struct ieee80211_sec_chan_offs_ie {
} __packed;
/**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ *
+ * This structure represents the "Mesh Channel Switch Paramters element"
+ */
+struct ieee80211_mesh_chansw_params_ie {
+ u8 mesh_ttl;
+ u8 mesh_flags;
+ __le16 mesh_reason;
+ __le16 mesh_pre_value;
+} __packed;
+
+/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
*/
struct ieee80211_wide_bw_chansw_ie {
@@ -751,6 +763,14 @@ enum mesh_config_capab_flags {
};
/**
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
* struct ieee80211_rann_ie
*
* This structure refers to "Root Announcement information element"
@@ -1391,8 +1411,8 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00030000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX 0x0000e000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00070000
#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index ddd33fd5904d..84ba5ac39e03 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -61,6 +61,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
+ void *fwd_priv;
struct macvlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
@@ -118,4 +119,21 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops);
extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline struct net_device *
+macvlan_dev_real_dev(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->lowerdev;
+}
+#else
+static inline struct net_device *
+macvlan_dev_real_dev(const struct net_device *dev)
+{
+ BUG();
+ return NULL;
+}
+#endif
+
#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 715c343f7c00..f252deb99454 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -89,6 +89,101 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+/**
+ * struct vlan_priority_tci_mapping - vlan egress priority mappings
+ * @priority: skb priority
+ * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+ * @next: pointer to next struct
+ */
+struct vlan_priority_tci_mapping {
+ u32 priority;
+ u16 vlan_qos;
+ struct vlan_priority_tci_mapping *next;
+};
+
+/**
+ * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
+ * @rx_packets: number of received packets
+ * @rx_bytes: number of received bytes
+ * @rx_multicast: number of received multicast packets
+ * @tx_packets: number of transmitted packets
+ * @tx_bytes: number of transmitted bytes
+ * @syncp: synchronization point for 64bit counters
+ * @rx_errors: number of rx errors
+ * @tx_dropped: number of tx drops
+ */
+struct vlan_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_errors;
+ u32 tx_dropped;
+};
+
+struct proc_dir_entry;
+struct netpoll;
+
+/**
+ * struct vlan_dev_priv - VLAN private device data
+ * @nr_ingress_mappings: number of ingress priority mappings
+ * @ingress_priority_map: ingress priority mappings
+ * @nr_egress_mappings: number of egress priority mappings
+ * @egress_priority_map: hash of egress priority mappings
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_id: VLAN identifier
+ * @flags: device flags
+ * @real_dev: underlying netdevice
+ * @real_dev_addr: address of underlying netdevice
+ * @dent: proc dir entry
+ * @vlan_pcpu_stats: ptr to percpu rx stats
+ */
+struct vlan_dev_priv {
+ unsigned int nr_ingress_mappings;
+ u32 ingress_priority_map[8];
+ unsigned int nr_egress_mappings;
+ struct vlan_priority_tci_mapping *egress_priority_map[16];
+
+ __be16 vlan_proto;
+ u16 vlan_id;
+ u16 flags;
+
+ struct net_device *real_dev;
+ unsigned char real_dev_addr[ETH_ALEN];
+
+ struct proc_dir_entry *dent;
+ struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+};
+
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline u16
+vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
+{
+ struct vlan_priority_tci_mapping *mp;
+
+ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
+
+ mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
+ while (mp) {
+ if (mp->priority == skprio) {
+ return mp->vlan_qos; /* This should already be shifted
+ * to mask correctly with the
+ * VLAN's TCI */
+ }
+ mp = mp->next;
+ }
+ return 0;
+}
+
extern bool vlan_do_receive(struct sk_buff **skb);
extern struct sk_buff *vlan_untag(struct sk_buff *skb);
@@ -121,6 +216,12 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
return 0;
}
+static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
+ u32 skprio)
+{
+ return 0;
+}
+
static inline bool vlan_do_receive(struct sk_buff **skb)
{
return false;
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 2bac0eb8948d..15607b45221a 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,6 +11,7 @@
#define _IIO_BUFFER_GENERIC_H_
#include <linux/sysfs.h>
#include <linux/iio/iio.h>
+#include <linux/kref.h>
#ifdef CONFIG_IIO_BUFFER
@@ -26,6 +27,8 @@ struct iio_buffer;
* @set_bytes_per_datum:set number of bytes per datum
* @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
+ * @release: called when the last reference to the buffer is dropped,
+ * should free all resources allocated by the buffer.
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -36,7 +39,7 @@ struct iio_buffer;
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- int (*store_to)(struct iio_buffer *buffer, u8 *data);
+ int (*store_to)(struct iio_buffer *buffer, const void *data);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
@@ -47,6 +50,8 @@ struct iio_buffer_access_funcs {
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
+
+ void (*release)(struct iio_buffer *buffer);
};
/**
@@ -67,6 +72,7 @@ struct iio_buffer_access_funcs {
* @demux_list: [INTERN] list of operations required to demux the scan.
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
* @buffer_list: [INTERN] entry in the devices list of current buffers.
+ * @ref: [INTERN] reference count of the buffer.
*/
struct iio_buffer {
int length;
@@ -81,8 +87,9 @@ struct iio_buffer {
bool stufftoread;
const struct attribute_group *attrs;
struct list_head demux_list;
- unsigned char *demux_bounce;
+ void *demux_bounce;
struct list_head buffer_list;
+ struct kref ref;
};
/**
@@ -120,7 +127,32 @@ int iio_scan_mask_set(struct iio_dev *indio_dev,
* @indio_dev: iio_dev structure for device.
* @data: Full scan.
*/
-int iio_push_to_buffers(struct iio_dev *indio_dev, unsigned char *data);
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+
+/*
+ * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: sample data
+ * @timestamp: timestamp for the sample data
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be indio->scan_bytes bytes large).
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
+ void *data, int64_t timestamp)
+{
+ if (indio_dev->scan_timestamp) {
+ size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
+ ((int64_t *)data)[ts_offset] = timestamp;
+ }
+
+ return iio_push_to_buffers(indio_dev, data);
+}
int iio_update_demux(struct iio_dev *indio_dev);
@@ -174,11 +206,27 @@ ssize_t iio_buffer_show_enable(struct device *dev,
iio_buffer_show_enable, \
iio_buffer_store_enable)
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev);
-
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
+void iio_buffer_put(struct iio_buffer *buffer);
+
+/**
+ * iio_device_attach_buffer - Attach a buffer to a IIO device
+ * @indio_dev: The device the buffer should be attached to
+ * @buffer: The buffer to attach to the device
+ *
+ * This function attaches a buffer to a IIO device. The buffer stays attached to
+ * the device until the device is freed. The function should only be called at
+ * most once per device.
+ */
+static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ indio_dev->buffer = iio_buffer_get(buffer);
+}
+
#else /* CONFIG_IIO_BUFFER */
static inline int iio_buffer_register(struct iio_dev *indio_dev,
@@ -191,6 +239,9 @@ static inline int iio_buffer_register(struct iio_dev *indio_dev,
static inline void iio_buffer_unregister(struct iio_dev *indio_dev)
{}
+static inline void iio_buffer_get(struct iio_buffer *buffer) {}
+static inline void iio_buffer_put(struct iio_buffer *buffer) {}
+
#endif /* CONFIG_IIO_BUFFER */
#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index e51f65480ea5..3c005eb3a0a4 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -16,6 +16,7 @@
#include <linux/irqreturn.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
#include <linux/platform_data/st_sensors_pdata.h>
@@ -184,6 +185,7 @@ struct st_sensors {
u8 wai;
char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
struct iio_chan_spec *ch;
+ int num_ch;
struct st_sensor_odr odr;
struct st_sensor_power pw;
struct st_sensor_axis enable_axis;
@@ -200,6 +202,8 @@ struct st_sensors {
* @trig: The trigger in use by the core driver.
* @sensor: Pointer to the current sensor struct in use.
* @current_fullscale: Maximum range of measure by the sensor.
+ * @vdd: Pointer to sensor's Vdd power supply
+ * @vdd_io: Pointer to sensor's Vdd-IO power supply
* @enabled: Status of the sensor (false->off, true->on).
* @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
* @buffer_data: Data used by buffer part.
@@ -215,6 +219,8 @@ struct st_sensor_data {
struct iio_trigger *trig;
struct st_sensors *sensor;
struct st_sensor_fullscale_avl *current_fullscale;
+ struct regulator *vdd;
+ struct regulator *vdd_io;
bool enabled;
bool multiread_bit;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 833926c91aa8..2752b1fd12be 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -77,7 +77,7 @@ struct iio_cb_buffer;
* fail.
*/
struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
- int (*cb)(u8 *data,
+ int (*cb)(const void *data,
void *private),
void *private);
/**
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 13ce220c7003..5dab2c41031f 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -26,20 +26,6 @@ struct iio_event_data {
#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
-enum iio_event_type {
- IIO_EV_TYPE_THRESH,
- IIO_EV_TYPE_MAG,
- IIO_EV_TYPE_ROC,
- IIO_EV_TYPE_THRESH_ADAPTIVE,
- IIO_EV_TYPE_MAG_ADAPTIVE,
-};
-
-enum iio_event_direction {
- IIO_EV_DIR_EITHER,
- IIO_EV_DIR_RISING,
- IIO_EV_DIR_FALLING,
-};
-
/**
* IIO_EVENT_CODE() - create event identifier
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 2103cc32a5fb..256a90a1bea6 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -36,6 +36,14 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_PHASE,
IIO_CHAN_INFO_HARDWAREGAIN,
IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+};
+
+enum iio_shared_by {
+ IIO_SEPARATE,
+ IIO_SHARED_BY_TYPE,
+ IIO_SHARED_BY_DIR,
+ IIO_SHARED_BY_ALL
};
enum iio_endian {
@@ -57,7 +65,7 @@ struct iio_dev;
*/
struct iio_chan_spec_ext_info {
const char *name;
- bool shared;
+ enum iio_shared_by shared;
ssize_t (*read)(struct iio_dev *, uintptr_t private,
struct iio_chan_spec const *, char *buf);
ssize_t (*write)(struct iio_dev *, uintptr_t private,
@@ -125,12 +133,35 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
#define IIO_ENUM_AVAILABLE(_name, _e) \
{ \
.name = (_name "_available"), \
- .shared = true, \
+ .shared = IIO_SHARED_BY_TYPE, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
/**
+ * struct iio_event_spec - specification for a channel event
+ * @type: Type of the event
+ * @dir: Direction of the event
+ * @mask_separate: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be registered per channel.
+ * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type.
+ * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type and
+ * direction.
+ * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by all channels.
+ */
+struct iio_event_spec {
+ enum iio_event_type type;
+ enum iio_event_direction dir;
+ unsigned long mask_separate;
+ unsigned long mask_shared_by_type;
+ unsigned long mask_shared_by_dir;
+ unsigned long mask_shared_by_all;
+};
+
+/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
* @channel: What number do we wish to assign the channel.
@@ -146,13 +177,18 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
* shift: Shift right by this before masking out
* realbits.
* endianness: little or big endian
- * @info_mask: What information is to be exported about this channel.
- * This includes calibbias, scale etc.
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_shared_by_type: What information is to be exported that is shared
-* by all channels of the same type.
+ * by all channels of the same type.
+ * @info_mask_shared_by_dir: What information is to be exported that is shared
+ * by all channels of the same direction.
+ * @info_mask_shared_by_all: What information is to be exported that is shared
+ * by all channels.
* @event_mask: What events can this channel produce.
+ * @event_spec: Array of events which should be registered for this
+ * channel.
+ * @num_event_specs: Size of the event_spec array.
* @ext_info: Array of extended info attributes for this channel.
* The array is NULL terminated, the last element should
* have its name field set to NULL.
@@ -186,10 +222,13 @@ struct iio_chan_spec {
u8 shift;
enum iio_endian endianness;
} scan_type;
- long info_mask;
long info_mask_separate;
long info_mask_shared_by_type;
+ long info_mask_shared_by_dir;
+ long info_mask_shared_by_all;
long event_mask;
+ const struct iio_event_spec *event_spec;
+ unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
const char *extend_name;
const char *datasheet_name;
@@ -212,7 +251,9 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
enum iio_chan_info_enum type)
{
return (chan->info_mask_separate & BIT(type)) |
- (chan->info_mask_shared_by_type & BIT(type));
+ (chan->info_mask_shared_by_type & BIT(type)) |
+ (chan->info_mask_shared_by_dir & BIT(type)) |
+ (chan->info_mask_shared_by_all & BIT(type));
}
#define IIO_ST(si, rb, sb, sh) \
@@ -270,6 +311,12 @@ struct iio_dev;
* is event dependant. event_code specifies which event.
* @write_event_value: write the value associated with the event.
* Meaning is event dependent.
+ * @read_event_config_new: find out if the event is enabled. New style interface.
+ * @write_event_config_new: set if the event is enabled. New style interface.
+ * @read_event_value_new: read a configuration value associated with the event.
+ * New style interface.
+ * @write_event_value_new: write a configuration value for the event. New style
+ * interface.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
@@ -310,6 +357,30 @@ struct iio_info {
int (*write_event_value)(struct iio_dev *indio_dev,
u64 event_code,
int val);
+
+ int (*read_event_config_new)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir);
+
+ int (*write_event_config_new)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state);
+
+ int (*read_event_value_new)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2);
+
+ int (*write_event_value_new)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -457,7 +528,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
{
if (indio_dev)
put_device(&indio_dev->dev);
-};
+}
/**
* dev_to_iio_dev() - Get IIO device struct from a device struct
@@ -593,7 +664,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
return indio_dev->currentmode
& (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE);
-};
+}
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
@@ -603,12 +674,12 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
return indio_dev->debugfs_dentry;
-};
+}
#else
static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
{
return NULL;
-};
+}
#endif
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 2958c960003a..8a1d18640ab9 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -100,6 +100,21 @@ struct iio_const_attr {
#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
IIO_CONST_ATTR(sampling_frequency_available, _string)
+/**
+ * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_show: output method for the attribute
+ **/
+#define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \
+ IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_string: frequency string for the attribute
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \
+ IIO_CONST_ATTR(integration_time_available, _string)
+
#define IIO_DEV_ATTR_TEMP_RAW(_show) \
IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0)
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 88bf0f0d27b4..4ac928ee31c5 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -54,6 +54,26 @@ enum iio_modifier {
IIO_MOD_LIGHT_BLUE,
};
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
+
+enum iio_event_info {
+ IIO_EV_INFO_ENABLE,
+ IIO_EV_INFO_VALUE,
+ IIO_EV_INFO_HYSTERESIS,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
#define IIO_VAL_INT 1
#define IIO_VAL_INT_PLUS_MICRO 2
#define IIO_VAL_INT_PLUS_NANO 3
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 79640e015a86..0d678aefe69d 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -147,25 +147,27 @@ struct in_ifaddr {
unsigned long ifa_tstamp; /* updated timestamp */
};
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv4_devconf *devconf);
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
{
return __ip_dev_find(net, addr, true);
}
-extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void devinet_init(void);
-extern struct in_device *inetdev_by_index(struct net *, int);
-extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+ int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+ __be32 mask);
static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
{
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
static inline void in_dev_put(struct in_device *idev)
{
diff --git a/include/linux/init.h b/include/linux/init.h
index e73f2b708525..8e68a64bfe00 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -26,8 +26,8 @@
* extern int initialize_foobar_device(int, int, int) __init;
*
* For initialized data:
- * You should insert __initdata between the variable name and equal
- * sign followed by value, e.g.:
+ * You should insert __initdata or __initconst between the variable name
+ * and equal sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
* static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
@@ -35,8 +35,6 @@
* Don't forget to initialize data not at file scope, i.e. within a function,
* as gcc otherwise puts the data into the bss section and not into the init
* section.
- *
- * Also note, that this data cannot be "const".
*/
/* These are for everybody (although not all archs will actually
@@ -153,6 +151,7 @@ extern unsigned int reset_devices;
void setup_arch(char **);
void prepare_namespace(void);
void __init load_default_modules(void);
+int __init init_rootfs(void);
extern void (*late_time_init)(void);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 5cd0f0949927..b0ed422e4e4a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,10 +32,10 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ \
- .mems_allowed_seq = SEQCNT_ZERO,
+#define INIT_CPUSET_SEQ(tsk) \
+ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
#else
-#define INIT_CPUSET_SEQ
+#define INIT_CPUSET_SEQ(tsk)
#endif
#define INIT_SIGNALS(sig) { \
@@ -220,7 +220,7 @@ extern struct task_group root_task_group;
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_CPUSET_SEQ \
+ INIT_CPUSET_SEQ(tsk) \
INIT_VTIME(tsk) \
}
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 78e2ada50cd5..d380c5e68008 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -55,7 +55,7 @@
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define OFFSET_STRIDE (9)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5fa5afeeb759..db43b58a3355 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,14 +11,13 @@
#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
+#include <asm/irq.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -120,7 +119,6 @@ struct irqaction {
extern irqreturn_t no_action(int cpl, void *dev_id);
-#ifdef CONFIG_GENERIC_HARDIRQS
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
@@ -140,40 +138,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-#else
-
-extern int __must_check
-request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
- const char *name, void *dev);
-
-/*
- * Special function to avoid ifdeffery in kernel/irq/devres.c which
- * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
- * m68k). I really love these $@%#!* obvious Makefile references:
- * ../../../kernel/irq/devres.o
- */
-static inline int __must_check
-request_threaded_irq(unsigned int irq, irq_handler_t handler,
- irq_handler_t thread_fn,
- unsigned long flags, const char *name, void *dev)
-{
- return request_irq(irq, handler, flags, name, dev);
-}
-
-static inline int __must_check
-request_any_context_irq(unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name, void *dev_id)
-{
- return request_irq(irq, handler, flags, name, dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq(unsigned int irq, irq_handler_t handler,
- const char *devname, void __percpu *percpu_dev_id)
-{
- return request_irq(irq, handler, 0, devname, percpu_dev_id);
-}
-#endif
extern void free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
@@ -221,7 +185,6 @@ extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
/* The following three functions are for the core kernel use only. */
-#ifdef CONFIG_GENERIC_HARDIRQS
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
#ifdef CONFIG_PM_SLEEP
@@ -229,13 +192,8 @@ extern int check_wakeup_irqs(void);
#else
static inline int check_wakeup_irqs(void) { return 0; }
#endif
-#else
-static inline void suspend_device_irqs(void) { };
-static inline void resume_device_irqs(void) { };
-static inline int check_wakeup_irqs(void) { return 0; }
-#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
@@ -287,9 +245,8 @@ static inline int irq_set_affinity_hint(unsigned int irq,
{
return -EINVAL;
}
-#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
+#endif /* CONFIG_SMP */
-#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.
* These should be used for locking constructs that
@@ -354,33 +311,6 @@ static inline int disable_irq_wake(unsigned int irq)
return irq_set_irq_wake(irq, 0);
}
-#else /* !CONFIG_GENERIC_HARDIRQS */
-/*
- * NOTE: non-genirq architectures, if they want to support the lock
- * validator need to define the methods below in their asm/irq.h
- * files, under an #ifdef CONFIG_LOCKDEP section.
- */
-#ifndef CONFIG_LOCKDEP
-# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
-# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
- disable_irq_nosync(irq)
-# define disable_irq_lockdep(irq) disable_irq(irq)
-# define enable_irq_lockdep(irq) enable_irq(irq)
-# define enable_irq_lockdep_irqrestore(irq, flags) \
- enable_irq(irq)
-# endif
-
-static inline int enable_irq_wake(unsigned int irq)
-{
- return 0;
-}
-
-static inline int disable_irq_wake(unsigned int irq)
-{
- return 0;
-}
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#ifdef CONFIG_IRQ_FORCED_THREADING
extern bool force_irqthreads;
@@ -443,6 +373,16 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
+
+#ifdef __ARCH_HAS_DO_SOFTIRQ
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -450,15 +390,6 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
-/* This is the worklist that queues up per-cpu softirq work.
- *
- * send_remote_sendirq() adds work to these lists, and
- * the softirq handler itself dequeues from them. The queues
- * are protected by disabling local cpu interrupts and they must
- * only be accessed by the local cpu that they are for.
- */
-DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
@@ -466,17 +397,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void)
return this_cpu_read(ksoftirqd);
}
-/* Try to send a softirq to a remote cpu. If this cannot be done, the
- * work will be queued to the local cpu.
- */
-extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
-
-/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
- * and compute the current cpu, passed in as 'this_cpu'.
- */
-extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
- int this_cpu, int softirq);
-
/* Tasklets --- multithreaded analogue of BHs.
Main feature differing them of generic softirqs: tasklet
@@ -655,7 +575,7 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
* if more than one irq occurred.
*/
-#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
+#if !defined(CONFIG_GENERIC_IRQ_PROBE)
static inline unsigned long probe_irq_on(void)
{
return 0;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3aeb7305e2f5..a444c790fa72 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/types.h>
+#include <trace/events/iommu.h>
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
@@ -58,10 +59,26 @@ struct iommu_domain {
#define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
+/*
+ * Following constraints are specifc to FSL_PAMUV1:
+ * -aperture must be power of 2, and naturally aligned
+ * -number of windows must be power of 2, and address space size
+ * of each window is determined by aperture size / # of windows
+ * -the actual size of the mapped region of a window must be power
+ * of 2 starting with 4KB and physical address must be naturally
+ * aligned.
+ * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
+ * The caller can invoke iommu_domain_get_attr to check if the underlying
+ * iommu implementation supports these constraints.
+ */
+
enum iommu_attr {
DOMAIN_ATTR_GEOMETRY,
DOMAIN_ATTR_PAGING,
DOMAIN_ATTR_WINDOWS,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_MAX,
};
@@ -211,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
ret = domain->handler(domain, dev, iova, flags,
domain->handler_token);
+ trace_io_page_fault(dev, iova, flags);
return ret;
}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c4d870b0d5e6..f6c82de12541 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -22,7 +22,7 @@ struct ipc_ids {
int in_use;
unsigned short seq;
unsigned short seq_max;
- struct rw_semaphore rw_mutex;
+ struct rw_semaphore rwsem;
struct idr ipcs_idr;
int next_id;
};
@@ -34,9 +34,9 @@ struct ipc_namespace {
int sem_ctls[4];
int used_sems;
- int msg_ctlmax;
- int msg_ctlmnb;
- int msg_ctlmni;
+ unsigned int msg_ctlmax;
+ unsigned int msg_ctlmnb;
+ unsigned int msg_ctlmni;
atomic_t msg_bytes;
atomic_t msg_hdrs;
int auto_msgmni;
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 28ea38439313..c56c350324e4 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
#include <uapi/linux/ipv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
+#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
/*
* This structure contains configuration options per IPv6 link.
*/
@@ -21,13 +22,11 @@ struct ipv6_devconf {
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
__s32 mldv2_unsolicited_report_interval;
-#ifdef CONFIG_IPV6_PRIVACY
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
__s32 regen_max_retry;
__s32 max_desync_factor;
-#endif
__s32 max_addresses;
__s32 accept_ra_defrtr;
__s32 accept_ra_pinfo;
@@ -115,16 +114,8 @@ static inline int inet6_iif(const struct sk_buff *skb)
return IP6CB(skb)->iif;
}
-struct inet6_request_sock {
- struct in6_addr loc_addr;
- struct in6_addr rmt_addr;
- struct sk_buff *pktopts;
- int iif;
-};
-
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
- struct inet6_request_sock tcp6rsk_inet6;
};
struct ipv6_mc_socklist;
@@ -141,8 +132,6 @@ struct ipv6_fl_socklist;
*/
struct ipv6_pinfo {
struct in6_addr saddr;
- struct in6_addr rcv_saddr;
- struct in6_addr daddr;
struct in6_pktinfo sticky_pktinfo;
const struct in6_addr *daddr_cache;
#ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +245,22 @@ struct tcp6_sock {
extern int inet6_sk_rebuild_header(struct sock *sk);
-struct inet6_timewait_sock {
- struct in6_addr tw_v6_daddr;
- struct in6_addr tw_v6_rcv_saddr;
-};
-
struct tcp6_timewait_sock {
struct tcp_timewait_sock tcp6tw_tcp;
- struct inet6_timewait_sock tcp6tw_inet6;
};
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
- return (struct inet6_timewait_sock *)(((u8 *)sk) +
- inet_twsk(sk)->tw_ipv6_offset);
-}
-
#if IS_ENABLED(CONFIG_IPV6)
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return inet_sk(__sk)->pinet6;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return (struct inet6_request_sock *)(((u8 *)rsk) +
- inet_rsk(rsk)->inet6_rsk_offset);
-}
-
-static inline u32 inet6_rsk_offset(struct request_sock *rsk)
-{
- return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
-}
-
static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
- if (req != NULL) {
- inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
- inet6_rsk(req)->pktopts = NULL;
- }
+ if (req)
+ inet_rsk(req)->pktopts = NULL;
return req;
}
@@ -321,21 +284,11 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
-static inline u16 inet6_tw_offset(const struct proto *prot)
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
{
- return prot->twsk_prot->twsk_obj_size -
- sizeof(struct inet6_timewait_sock);
-}
-
-static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
-{
- return likely(sk->sk_state != TCP_TIME_WAIT) ?
- &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
-{
- return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
+ if (sk->sk_family == AF_INET6)
+ return &sk->sk_v6_rcv_saddr;
+ return NULL;
}
static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +316,18 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return NULL;
}
-#define __inet6_rcv_saddr(__sk) NULL
#define inet6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- ((inet_sk(__sk)->inet_portpair == (__ports)) && \
+ (((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
- ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
+ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
(!(__sk)->sk_bound_dev_if || \
((__sk)->sk_bound_dev_if == (__dif))) && \
net_eq(sock_net(__sk), (__net)))
-#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
- ((inet_twsk(__sk)->tw_portpair == (__ports)) && \
- ((__sk)->sk_family == AF_INET6) && \
- ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr)) && \
- ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif))) && \
- net_eq(sock_net(__sk), (__net)))
-
#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f04d3ba335cb..7dc10036eff5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -94,12 +97,14 @@ enum {
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+ IRQ_IS_POLLED)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -382,8 +387,6 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
-#ifdef CONFIG_GENERIC_HARDIRQS
-
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
void irq_move_masked_irq(struct irq_data *data);
@@ -802,11 +805,4 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
-#else /* !CONFIG_GENERIC_HARDIRQS */
-
-extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
-extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
-
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 3e203eb23cc7..cac496b1e279 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -31,6 +31,8 @@
#define GIC_DIST_TARGET 0x800
#define GIC_DIST_CONFIG 0xc00
#define GIC_DIST_SOFTINT 0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
+#define GIC_DIST_SGI_PENDING_SET 0xf20
#define GICH_HCR 0x0
#define GICH_VTR 0x4
@@ -66,6 +68,7 @@ extern struct irq_chip gic_arch_extn;
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_cpu_if_down(void);
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
@@ -73,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
gic_init_bases(nr, start, dist, cpu, 0, NULL);
}
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
new file mode 100644
index 000000000000..c78a8921185d
--- /dev/null
+++ b/include/linux/irqchip/mmp.h
@@ -0,0 +1,6 @@
+#ifndef __IRQCHIP_MMP_H
+#define __IRQCHIP_MMP_H
+
+extern struct irq_chip icu_irq_chip;
+
+#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e2ff97..56fb646909dc 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -76,8 +76,6 @@ struct irq_desc {
extern struct irq_desc irq_desc[NR_IRQS];
#endif
-#ifdef CONFIG_GENERIC_HARDIRQS
-
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
@@ -173,6 +171,5 @@ __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
desc->preflow_handler = handler;
}
#endif
-#endif
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 0a2dc46cdaf6..fdd5cc16c9c4 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -4,23 +4,6 @@
#include <uapi/linux/irqnr.h>
-#ifndef CONFIG_GENERIC_HARDIRQS
-#include <asm/irq.h>
-
-/*
- * Wrappers for non-genirq architectures:
- */
-#define nr_irqs NR_IRQS
-#define irq_to_desc(irq) (&irq_desc[irq])
-
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0; irq < nr_irqs; irq++)
-
-# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1; irq >= 0; irq--)
-
-#else /* CONFIG_GENERIC_HARDIRQS */
-
extern int nr_irqs;
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
@@ -50,8 +33,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#define for_each_irq_nr(irq) \
for (irq = 0; irq < nr_irqs; irq++)
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 714ba08dc092..e374e369fb2f 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -14,6 +14,6 @@ enum irqreturn {
};
typedef enum irqreturn irqreturn_t;
-#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
+#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a5079072da66..39999775b922 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -48,6 +48,13 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
+ "%s used before call to jump_label_init", \
+ __func__)
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -128,29 +135,32 @@ struct static_key {
static __always_inline void jump_label_init(void)
{
+ static_key_initialized = true;
}
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(atomic_read(&key->enabled)) > 0)
+ if (unlikely(atomic_read(&key->enabled) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(atomic_read(&key->enabled)) > 0)
+ if (likely(atomic_read(&key->enabled) > 0))
return true;
return false;
}
static inline void static_key_slow_inc(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
atomic_inc(&key->enabled);
}
static inline void static_key_slow_dec(struct static_key *key)
{
+ STATIC_KEY_CHECK_USE();
atomic_dec(&key->enabled);
}
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 113788389b3d..089f70f83e97 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -23,12 +23,14 @@ struct static_key_deferred {
};
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
+ STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
+ STATIC_KEY_CHECK_USE();
}
#endif /* HAVE_JUMP_LABEL */
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 7f6fe6e015bc..290db1269c4c 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -109,6 +109,7 @@ typedef enum {
KDB_REASON_RECURSE, /* Recursive entry to kdb;
* regs probably valid */
KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
+ KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
} kdb_reason_t;
extern int kdb_trap_printk;
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 546eb6a76934..f65ce09784f1 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -15,5 +15,6 @@
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
#define KPF_UNCACHED 39
+#define KPF_SOFTDIRTY 40
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 482ad2d84a32..ecb87544cc5d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+#if defined(CONFIG_MMU) && \
+ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
void might_fault(void);
#else
static inline void might_fault(void) { }
@@ -439,6 +440,17 @@ static inline char *hex_byte_pack(char *buf, u8 byte)
return buf;
}
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
{
return hex_byte_pack(buf, byte);
@@ -490,7 +502,6 @@ void tracing_snapshot_alloc(void);
extern void tracing_start(void);
extern void tracing_stop(void);
-extern void ftrace_off_permanent(void);
static inline __printf(1, 2)
void ____trace_printk_check_format(const char *fmt, ...)
@@ -628,7 +639,6 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
#else
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
-static inline void ftrace_off_permanent(void) { }
static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ed5f6ed6eb77..51c72be4a7c3 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -36,9 +36,6 @@ struct kernel_cpustat {
};
struct kernel_stat {
-#ifndef CONFIG_GENERIC_HARDIRQS
- unsigned int irqs[NR_IRQS];
-#endif
unsigned long irqs_sum;
unsigned int softirqs[NR_SOFTIRQS];
};
@@ -54,22 +51,6 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
extern unsigned long long nr_context_switches(void);
-#ifndef CONFIG_GENERIC_HARDIRQS
-
-struct irq_desc;
-
-static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
- struct irq_desc *desc)
-{
- __this_cpu_inc(kstat.irqs[irq]);
- __this_cpu_inc(kstat.irqs_sum);
-}
-
-static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
-{
- return kstat_cpu(cpu).irqs[irq];
-}
-#else
#include <linux/irq.h>
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -79,8 +60,6 @@ do { \
__this_cpu_inc(kstat.irqs_sum); \
} while (0)
-#endif
-
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
{
__this_cpu_inc(kstat.softirqs[irq]);
@@ -94,20 +73,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-#ifndef CONFIG_GENERIC_HARDIRQS
-static inline unsigned int kstat_irqs(unsigned int irq)
-{
- unsigned int sum = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- sum += kstat_irqs_cpu(irq, cpu);
-
- return sum;
-}
-#else
extern unsigned int kstat_irqs(unsigned int irq);
-#endif
/*
* Number of interrupts per cpu, since bootup
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a733b1..5fd33dc1fe3a 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
+/* flag to track if kexec reboot is in progress */
+extern bool kexec_in_progress;
+
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 518a53afb9ea..a74c3a84dfdd 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -45,6 +45,7 @@ struct key_preparsed_payload {
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
+ bool trusted; /* True if key is trusted */
};
typedef int (*request_key_actor_t)(struct key_construction *key,
@@ -63,6 +64,11 @@ struct key_type {
*/
size_t def_datalen;
+ /* Default key search algorithm. */
+ unsigned def_lookup_type;
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/key.h b/include/linux/key.h
index 4dfde1161c5e..80d677483e31 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -22,6 +22,7 @@
#include <linux/sysctl.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
+#include <linux/assoc_array.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -82,6 +83,12 @@ struct key_owner;
struct keyring_list;
struct keyring_name;
+struct keyring_index_key {
+ struct key_type *type;
+ const char *description;
+ size_t desc_len;
+};
+
/*****************************************************************************/
/*
* key reference with possession attribute handling
@@ -99,7 +106,7 @@ struct keyring_name;
typedef struct __key_reference_with_attributes *key_ref_t;
static inline key_ref_t make_key_ref(const struct key *key,
- unsigned long possession)
+ bool possession)
{
return (key_ref_t) ((unsigned long) key | possession);
}
@@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
return (struct key *) ((unsigned long) key_ref & ~1UL);
}
-static inline unsigned long is_key_possessed(const key_ref_t key_ref)
+static inline bool is_key_possessed(const key_ref_t key_ref)
{
return (unsigned long) key_ref & 1UL;
}
@@ -129,7 +136,6 @@ struct key {
struct list_head graveyard_link;
struct rb_node serial_node;
};
- struct key_type *type; /* type of key */
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
@@ -162,13 +168,21 @@ struct key {
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
+#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
+#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
- /* the description string
- * - this is used to match a key against search criteria
- * - this should be a printable string
+ /* the key type and key description string
+ * - the desc is used to match a key against search criteria
+ * - it should be a printable string
* - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
*/
- char *description;
+ union {
+ struct keyring_index_key index_key;
+ struct {
+ struct key_type *type; /* type of key */
+ char *description;
+ };
+ };
/* type specific data
* - this is used by the keyring type to index the name
@@ -185,11 +199,14 @@ struct key {
* whatever
*/
union {
- unsigned long value;
- void __rcu *rcudata;
- void *data;
- struct keyring_list __rcu *subscriptions;
- } payload;
+ union {
+ unsigned long value;
+ void __rcu *rcudata;
+ void *data;
+ void *data2[2];
+ } payload;
+ struct assoc_array keys;
+ };
};
extern struct key *key_alloc(struct key_type *type,
@@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
extern void key_put(struct key *key);
-static inline struct key *key_get(struct key *key)
+static inline struct key *__key_get(struct key *key)
{
- if (key)
- atomic_inc(&key->usage);
+ atomic_inc(&key->usage);
return key;
}
+static inline struct key *key_get(struct key *key)
+{
+ return key ? __key_get(key) : key;
+}
+
static inline void key_ref_put(key_ref_t key_ref)
{
key_put(key_ref_to_ptr(key_ref));
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 10308c6a3d1c..552d51efb429 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,7 +1,7 @@
/*
* A generic kernel FIFO implementation
*
- * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
+ * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,9 +67,10 @@ struct __kfifo {
union { \
struct __kfifo kfifo; \
datatype *type; \
+ const datatype *const_type; \
char (*rectype)[recsize]; \
ptrtype *ptr; \
- const ptrtype *ptr_const; \
+ ptrtype const *ptr_const; \
}
#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
@@ -386,16 +387,12 @@ __kfifo_int_must_check_helper( \
#define kfifo_put(fifo, val) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((val) + 1) __val = (val); \
+ typeof(*__tmp->const_type) __val = (val); \
unsigned int __ret; \
- const size_t __recsize = sizeof(*__tmp->rectype); \
+ size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) { \
- typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
- __dummy = (typeof(__val))NULL; \
- } \
if (__recsize) \
- __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \
+ __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
__recsize); \
else { \
__ret = !kfifo_is_full(__tmp); \
@@ -404,7 +401,7 @@ __kfifo_int_must_check_helper( \
((typeof(__tmp->type))__kfifo->data) : \
(__tmp->buf) \
)[__kfifo->in & __tmp->kfifo.mask] = \
- *(typeof(__tmp->type))__val; \
+ (typeof(*__tmp->type))__val; \
smp_wmb(); \
__kfifo->in++; \
} \
@@ -415,7 +412,7 @@ __kfifo_int_must_check_helper( \
/**
* kfifo_get - get data from the fifo
* @fifo: address of the fifo to be used
- * @val: the var where to store the data to be added
+ * @val: address where to store the data
*
* This macro reads the data from the fifo.
* It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -428,12 +425,10 @@ __kfifo_int_must_check_helper( \
__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((val) + 1) __val = (val); \
+ typeof(__tmp->ptr) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) \
- __val = (typeof(__tmp->ptr))0; \
if (__recsize) \
__ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
__recsize); \
@@ -456,7 +451,7 @@ __kfifo_uint_must_check_helper( \
/**
* kfifo_peek - get data from the fifo without removing
* @fifo: address of the fifo to be used
- * @val: the var where to store the data to be added
+ * @val: address where to store the data
*
* This reads the data from the fifo without removing it from the fifo.
* It returns 0 if the fifo was empty. Otherwise it returns the number
@@ -469,12 +464,10 @@ __kfifo_uint_must_check_helper( \
__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((val) + 1) __val = (val); \
+ typeof(__tmp->ptr) __val = (val); \
unsigned int __ret; \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) \
- __val = (typeof(__tmp->ptr))NULL; \
if (__recsize) \
__ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
__recsize); \
@@ -508,14 +501,10 @@ __kfifo_uint_must_check_helper( \
#define kfifo_in(fifo, buf, n) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((buf) + 1) __buf = (buf); \
+ typeof(__tmp->ptr_const) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) { \
- typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \
- __dummy = (typeof(__buf))NULL; \
- } \
(__recsize) ?\
__kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
__kfifo_in(__kfifo, __buf, __n); \
@@ -561,14 +550,10 @@ __kfifo_uint_must_check_helper( \
__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((buf) + 1) __buf = (buf); \
+ typeof(__tmp->ptr) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) { \
- typeof(__tmp->ptr) __dummy = NULL; \
- __buf = __dummy; \
- } \
(__recsize) ?\
__kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
__kfifo_out(__kfifo, __buf, __n); \
@@ -773,14 +758,10 @@ __kfifo_uint_must_check_helper( \
__kfifo_uint_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
- typeof((buf) + 1) __buf = (buf); \
+ typeof(__tmp->ptr) __buf = (buf); \
unsigned long __n = (n); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (0) { \
- typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \
- __buf = __dummy; \
- } \
(__recsize) ? \
__kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
__kfifo_out_peek(__kfifo, __buf, __n); \
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index c6e091bf39a5..dfb4f2ffdaa2 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -310,6 +310,7 @@ extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
struct pt_regs *regs);
extern int kgdb_nmicallback(int cpu, void *regs);
+extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *snd_rdy);
extern void gdbstub_exit(int status);
extern int kgdb_single_step;
diff --git a/include/linux/kobj_completion.h b/include/linux/kobj_completion.h
new file mode 100644
index 000000000000..a428f6436063
--- /dev/null
+++ b/include/linux/kobj_completion.h
@@ -0,0 +1,18 @@
+#ifndef _KOBJ_COMPLETION_H_
+#define _KOBJ_COMPLETION_H_
+
+#include <linux/kobject.h>
+#include <linux/completion.h>
+
+struct kobj_completion {
+ struct kobject kc_kobj;
+ struct completion kc_unregister;
+};
+
+#define kobj_to_kobj_completion(kobj) \
+ container_of(kobj, struct kobj_completion, kc_kobj)
+
+void kobj_completion_init(struct kobj_completion *kc, struct kobj_type *ktype);
+void kobj_completion_release(struct kobject *kobj);
+void kobj_completion_del_and_wait(struct kobj_completion *kc);
+#endif /* _KOBJ_COMPLETION_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index de6dcbcc6ef7..e7ba650086ce 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -107,6 +107,7 @@ extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
+extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index f66b065a8b5f..df32d2508290 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -39,6 +39,7 @@ enum kobj_ns_type {
*/
struct kobj_ns_type_operations {
enum kobj_ns_type type;
+ bool (*current_may_mount)(void);
void *(*grab_current_ns)(void);
const void *(*netlink_ns)(struct sock *sk);
const void *(*initial_ns)(void);
@@ -50,6 +51,7 @@ int kobj_ns_type_registered(enum kobj_ns_type type);
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
const void *kobj_ns_initial(enum kobj_ns_type type);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ca1d27a0d6a6..925eaf28fca9 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -264,10 +264,36 @@ extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
-extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
+struct kprobe_insn_cache {
+ struct mutex mutex;
+ void *(*alloc)(void); /* allocate insn page */
+ void (*free)(void *); /* free insn page */
+ struct list_head pages; /* list of kprobe_insn_page */
+ size_t insn_size; /* size of instruction slot */
+ int nr_garbage;
+};
+
+extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
+extern void __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty);
+
+#define DEFINE_INSN_CACHE_OPS(__name) \
+extern struct kprobe_insn_cache kprobe_##__name##_slots; \
+ \
+static inline kprobe_opcode_t *get_##__name##_slot(void) \
+{ \
+ return __get_insn_slot(&kprobe_##__name##_slots); \
+} \
+ \
+static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
+{ \
+ __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
+} \
+
+DEFINE_INSN_CACHE_OPS(insn);
+
#ifdef CONFIG_OPTPROBES
/*
* Internal structure for direct jump optimized probe
@@ -287,13 +313,13 @@ extern void arch_optimize_kprobes(struct list_head *oplist);
extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
-extern kprobe_opcode_t *get_optinsn_slot(void);
-extern void free_optinsn_slot(kprobe_opcode_t *slot, int dirty);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
unsigned long addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+DEFINE_INSN_CACHE_OPS(optinsn);
+
#ifdef CONFIG_SYSCTL
extern int sysctl_kprobes_optimization;
extern int proc_kprobes_optimization_handler(struct ctl_table *table,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ca645a01d37a..9523d2ad7535 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -142,7 +142,7 @@ struct kvm;
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
-extern raw_spinlock_t kvm_lock;
+extern spinlock_t kvm_lock;
extern struct list_head vm_list;
struct kvm_io_range {
@@ -189,8 +189,7 @@ struct kvm_async_pf {
gva_t gva;
unsigned long addr;
struct kvm_arch_async_pf arch;
- struct page *page;
- bool done;
+ bool wakeup_all;
};
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
@@ -508,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
-void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
-int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages);
void kvm_arch_memslots_updated(struct kvm *kvm);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -533,6 +533,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
@@ -670,6 +671,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
+#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
+#else
+static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_WQP
@@ -746,9 +766,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
-/* For vcpu->arch.iommu_flags */
-#define KVM_IOMMU_CACHE_COHERENCY 0x1
-
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
@@ -788,7 +805,7 @@ static inline void kvm_guest_enter(void)
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
- * is very similar to exiting to userspase from rcu point of view. In
+ * is very similar to exiting to userspace from rcu point of view. In
* addition CPU may stay in a guest mode for quite a long time (up to
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
@@ -841,13 +858,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
return gfn_to_memslot(kvm, gfn)->id;
}
-static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
-{
- /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
- return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
- (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-}
-
static inline gfn_t
hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
{
@@ -1065,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp);
extern struct kvm_device_ops kvm_mpic_ops;
extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_vfio_ops;
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 0d24e932db0b..96549abe8842 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -25,16 +25,6 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
-/* can make br locks by using local lock for read side, global lock for write */
-#define br_lock_init(name) lg_lock_init(name, #name)
-#define br_read_lock(name) lg_local_lock(name)
-#define br_read_unlock(name) lg_local_unlock(name)
-#define br_write_lock(name) lg_global_lock(name)
-#define br_write_unlock(name) lg_global_unlock(name)
-
-#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
-#define DEFINE_STATIC_BRLOCK(name) DEFINE_STATIC_LGLOCK(name)
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define LOCKDEP_INIT_MAP lockdep_init_map
#else
diff --git a/include/linux/list.h b/include/linux/list.h
index f4d8a2f12a33..ef9594171062 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -362,6 +362,17 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((ptr)->next, type, member)
/**
+ * list_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
@@ -373,6 +384,22 @@ static inline void list_splice_tail_init(struct list_head *list,
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
+ * list_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
@@ -416,9 +443,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
@@ -427,9 +454,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
+ for (pos = list_last_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
@@ -452,9 +479,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
+ for (pos = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
@@ -466,9 +493,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
- for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = list_entry(pos->member.prev, typeof(*pos), member))
+ for (pos = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_from - iterate over list of given type from the current point
@@ -479,8 +506,8 @@ static inline void list_splice_tail_init(struct list_head *list,
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
- for (; &pos->member != (head); \
- pos = list_entry(pos->member.next, typeof(*pos), member))
+ for (; &pos->member != (head); \
+ pos = list_next_entry(pos, member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
@@ -490,10 +517,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
- for (pos = list_entry((head)->next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
+ for (pos = list_first_entry(head, typeof(*pos), member), \
+ n = list_next_entry(pos, member); \
&pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
+ pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_continue - continue list iteration safe against removal
@@ -506,10 +533,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = list_entry(pos->member.next, typeof(*pos), member), \
- n = list_entry(pos->member.next, typeof(*pos), member); \
+ for (pos = list_next_entry(pos, member), \
+ n = list_next_entry(pos, member); \
&pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
+ pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
@@ -522,9 +549,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = list_entry(pos->member.next, typeof(*pos), member); \
+ for (n = list_next_entry(pos, member); \
&pos->member != (head); \
- pos = n, n = list_entry(n->member.next, typeof(*n), member))
+ pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
@@ -537,10 +564,10 @@ static inline void list_splice_tail_init(struct list_head *list,
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = list_entry((head)->prev, typeof(*pos), member), \
- n = list_entry(pos->member.prev, typeof(*pos), member); \
+ for (pos = list_last_entry(head, typeof(*pos), member), \
+ n = list_prev_entry(pos, member); \
&pos->member != (head); \
- pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+ pos = n, n = list_prev_entry(n, member))
/**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
@@ -555,7 +582,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* completing the current iteration of the loop body.
*/
#define list_safe_reset_next(pos, n, member) \
- n = list_entry(pos->member.next, typeof(*pos), member)
+ n = list_next_entry(pos, member)
/*
* Double linked lists with a single pointer list head.
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
new file mode 100644
index 000000000000..3ce541753c88
--- /dev/null
+++ b/include/linux/list_lru.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
+ * Authors: David Chinner and Glauber Costa
+ *
+ * Generic LRU infrastructure
+ */
+#ifndef _LRU_LIST_H
+#define _LRU_LIST_H
+
+#include <linux/list.h>
+#include <linux/nodemask.h>
+
+/* list_lru_walk_cb has to always return one of those */
+enum lru_status {
+ LRU_REMOVED, /* item removed from list */
+ LRU_ROTATE, /* item referenced, give another pass */
+ LRU_SKIP, /* item cannot be locked, skip */
+ LRU_RETRY, /* item not freeable. May drop the lock
+ internally, but has to return locked. */
+};
+
+struct list_lru_node {
+ spinlock_t lock;
+ struct list_head list;
+ /* kept as signed so we can catch imbalance bugs */
+ long nr_items;
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ struct list_lru_node *node;
+ nodemask_t active_nodes;
+};
+
+void list_lru_destroy(struct list_lru *lru);
+int list_lru_init(struct list_lru *lru);
+
+/**
+ * list_lru_add: add an element to the lru list's tail
+ * @list_lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * If the element is already part of a list, this function returns doing
+ * nothing. Therefore the caller does not need to keep state about whether or
+ * not the element already belongs in the list and is allowed to lazy update
+ * it. Note however that this is valid for *a* list, not *this* list. If
+ * the caller organize itself in a way that elements can be in more than
+ * one type of list, it is up to the caller to fully remove the item from
+ * the previous list (with list_lru_del() for instance) before moving it
+ * to @list_lru
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_del: delete an element to the lru list
+ * @list_lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function works analogously as list_lru_add in terms of list
+ * manipulation. The comments about an element already pertaining to
+ * a list are also valid for list_lru_del.
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_del(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_count_node: return the number of objects currently held by @lru
+ * @lru: the lru pointer.
+ * @nid: the node id to count from.
+ *
+ * Always return a non-negative number, 0 for empty lists. There is no
+ * guarantee that the list is not updated while the count is being computed.
+ * Callers that want such a guarantee need to provide an outer lock.
+ */
+unsigned long list_lru_count_node(struct list_lru *lru, int nid);
+static inline unsigned long list_lru_count(struct list_lru *lru)
+{
+ long count = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes)
+ count += list_lru_count_node(lru, nid);
+
+ return count;
+}
+
+typedef enum lru_status
+(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
+/**
+ * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @isolate: callback function that is resposible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * This function will scan all elements in a particular list_lru, calling the
+ * @isolate callback for each of those items, along with the current list
+ * spinlock and a caller-provided opaque. The @isolate callback can choose to
+ * drop the lock internally, but *must* return with the lock held. The callback
+ * will return an enum lru_status telling the list_lru infrastructure what to
+ * do with the object being scanned.
+ *
+ * Please note that nr_to_walk does not mean how many objects will be freed,
+ * just how many objects will be scanned.
+ *
+ * Return value: the number of objects effectively removed from the LRU.
+ */
+unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
+
+static inline unsigned long
+list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long nr_to_walk)
+{
+ long isolated = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes) {
+ isolated += list_lru_walk_node(lru, nid, isolate,
+ cb_arg, &nr_to_walk);
+ if (nr_to_walk <= 0)
+ break;
+ }
+ return isolated;
+}
+#endif /* _LRU_LIST_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 8828a78dec9a..fbf10a0bc095 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+struct llist_node *llist_reverse_order(struct llist_node *head);
+
#endif /* LLIST_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index cfc2f119779a..92b1bfc5da60 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define seqcount_release(l, n, i) lock_release(l, n, i)
+
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, n, i) lock_release(l, n, i)
@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-# define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, n, i) lock_release(l, n, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
-# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index ca07b5028b01..c8929c3832db 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -15,10 +15,15 @@
*/
#include <linux/spinlock.h>
+#include <generated/bounds.h>
+
+#define USE_CMPXCHG_LOCKREF \
+ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+ IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
struct lockref {
union {
-#ifdef CONFIG_CMPXCHG_LOCKREF
+#if USE_CMPXCHG_LOCKREF
aligned_u64 lock_count;
#endif
struct {
@@ -33,4 +38,13 @@ extern int lockref_get_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
+extern void lockref_mark_dead(struct lockref *);
+extern int lockref_get_not_dead(struct lockref *);
+
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+ return ((int)l->count < 0);
+}
+
#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index d21c13f10a64..4356686b0a39 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -67,8 +67,8 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
* note : Destination buffer must be already allocated.
* slightly faster than lz4_decompress_unknownoutputsize()
*/
-int lz4_decompress(const char *src, size_t *src_len, char *dest,
- size_t actual_dest_len);
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len);
/*
* lz4_decompress_unknownoutputsize()
@@ -82,6 +82,6 @@ int lz4_decompress(const char *src, size_t *src_len, char *dest,
* Error if return (< 0)
* note : Destination buffer must be already allocated.
*/
-int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
- char *dest, size_t *dest_len);
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+ unsigned char *dest, size_t *dest_len);
#endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 2913b86eb12a..69ed5f5e9f6e 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -31,6 +31,15 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
}
/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -63,6 +72,10 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
#ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index dba482e31a13..345b8c53b897 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -11,6 +11,8 @@
#ifndef __LINUX_MBUS_H
#define __LINUX_MBUS_H
+struct resource;
+
struct mbus_dram_target_info
{
/*
@@ -59,14 +61,18 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
}
#endif
-int mvebu_mbus_add_window_remap_flags(const char *devname, phys_addr_t base,
- size_t size, phys_addr_t remap,
- unsigned int flags);
-int mvebu_mbus_add_window(const char *devname, phys_addr_t base,
- size_t size);
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+ unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap);
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size);
int mvebu_mbus_del_window(phys_addr_t base, size_t size);
int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
size_t mbus_size, phys_addr_t sdram_phys_base,
size_t sdram_size);
+int mvebu_mbus_dt_init(void);
#endif /* __LINUX_MBUS_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f388203db7e8..77c60e52939d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -35,6 +35,7 @@ struct memblock_type {
};
struct memblock {
+ bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
struct memblock_type memory;
struct memblock_type reserved;
@@ -60,6 +61,8 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
+ unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
@@ -146,6 +149,29 @@ phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+#ifdef CONFIG_MOVABLE_NODE
+/*
+ * Set the allocation direction to bottom-up or top-down.
+ */
+static inline void memblock_set_bottom_up(bool enable)
+{
+ memblock.bottom_up = enable;
+}
+
+/*
+ * Check if the allocation direction is bottom-up or not.
+ * if this is true, that said, memblock will allocate memory
+ * in bottom-up direction.
+ */
+static inline bool memblock_bottom_up(void)
+{
+ return memblock.bottom_up;
+}
+#else
+static inline void memblock_set_bottom_up(bool enable) {}
+static inline bool memblock_bottom_up(void) { return false; }
+#endif
+
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c416092e324..b3e7a667e03c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -30,9 +30,21 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/* Stats that can be updated by kernel. */
-enum mem_cgroup_page_stat_item {
- MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+/*
+ * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
+ * These two lists should keep in accord with each other.
+ */
+enum mem_cgroup_stat_index {
+ /*
+ * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
+ */
+ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
+ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_NSTATS,
};
struct mem_cgroup_reclaim_cookie {
@@ -125,6 +137,25 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
+static inline void mem_cgroup_oom_enable(void)
+{
+ WARN_ON(current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 1;
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+ WARN_ON(!current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_oom.memcg;
+}
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
#endif
@@ -165,17 +196,17 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
}
void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx,
+ enum mem_cgroup_stat_index idx,
int val);
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, -1);
}
@@ -348,13 +379,31 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
{
}
+static inline void mem_cgroup_oom_enable(void)
+{
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+ return false;
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index dd38e62b84d2..4ca3d951fe91 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,6 +94,8 @@ extern void __online_page_set_limits(struct page *page);
extern void __online_page_increment_counters(struct page *page);
extern void __online_page_free(struct page *page);
+extern int try_online_node(int nid);
+
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size);
@@ -225,6 +227,11 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}
+static inline int try_online_node(int nid)
+{
+ return 0;
+}
+
static inline void lock_memory_hotplug(void) {}
static inline void unlock_memory_hotplug(void) {}
@@ -256,14 +263,12 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
-extern int mem_online_node(int nid);
extern int add_memory(int nid, u64 start, u64 size);
extern int arch_add_memory(int nid, u64 start, u64 size);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
- int nr_pages);
+extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 0d7df39a5885..9fe426b30a41 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -91,7 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
}
#define vma_policy(vma) ((vma)->vm_policy)
-#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
static inline void mpol_get(struct mempolicy *pol)
{
@@ -126,6 +125,7 @@ struct shared_policy {
spinlock_t lock;
};
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
int mpol_set_shared_policy(struct shared_policy *info,
struct vm_area_struct *vma,
@@ -136,6 +136,7 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_vma_policy(struct task_struct *tsk,
struct vm_area_struct *vma, unsigned long addr);
+bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
@@ -168,12 +169,12 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
extern int mpol_parse_str(char *str, struct mempolicy **mpol);
#endif
-extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
+extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
static inline int vma_migratable(struct vm_area_struct *vma)
{
- if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
return 0;
/*
* Migration allocates pages in the highest zone. If we cannot
@@ -240,7 +241,12 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
}
#define vma_policy(vma) NULL
-#define vma_set_policy(vma, pol) do {} while(0)
+
+static inline int
+vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+ return 0;
+}
static inline void numa_policy_init(void)
{
@@ -301,9 +307,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+static inline void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
- return 0;
}
static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 4706d3d46e56..cb49417f8ba9 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1908,7 +1908,7 @@
#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */
#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */
#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */
-#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */
#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
new file mode 100644
index 000000000000..16bf8a0dcd97
--- /dev/null
+++ b/include/linux/mfd/as3722.h
@@ -0,0 +1,423 @@
+/*
+ * as3722 definitions
+ *
+ * Copyright (C) 2013 ams
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __LINUX_MFD_AS3722_H__
+#define __LINUX_MFD_AS3722_H__
+
+#include <linux/regmap.h>
+
+/* AS3722 registers */
+#define AS3722_SD0_VOLTAGE_REG 0x00
+#define AS3722_SD1_VOLTAGE_REG 0x01
+#define AS3722_SD2_VOLTAGE_REG 0x02
+#define AS3722_SD3_VOLTAGE_REG 0x03
+#define AS3722_SD4_VOLTAGE_REG 0x04
+#define AS3722_SD5_VOLTAGE_REG 0x05
+#define AS3722_SD6_VOLTAGE_REG 0x06
+#define AS3722_GPIO0_CONTROL_REG 0x08
+#define AS3722_GPIO1_CONTROL_REG 0x09
+#define AS3722_GPIO2_CONTROL_REG 0x0A
+#define AS3722_GPIO3_CONTROL_REG 0x0B
+#define AS3722_GPIO4_CONTROL_REG 0x0C
+#define AS3722_GPIO5_CONTROL_REG 0x0D
+#define AS3722_GPIO6_CONTROL_REG 0x0E
+#define AS3722_GPIO7_CONTROL_REG 0x0F
+#define AS3722_LDO0_VOLTAGE_REG 0x10
+#define AS3722_LDO1_VOLTAGE_REG 0x11
+#define AS3722_LDO2_VOLTAGE_REG 0x12
+#define AS3722_LDO3_VOLTAGE_REG 0x13
+#define AS3722_LDO4_VOLTAGE_REG 0x14
+#define AS3722_LDO5_VOLTAGE_REG 0x15
+#define AS3722_LDO6_VOLTAGE_REG 0x16
+#define AS3722_LDO7_VOLTAGE_REG 0x17
+#define AS3722_LDO9_VOLTAGE_REG 0x19
+#define AS3722_LDO10_VOLTAGE_REG 0x1A
+#define AS3722_LDO11_VOLTAGE_REG 0x1B
+#define AS3722_GPIO_DEB1_REG 0x1E
+#define AS3722_GPIO_DEB2_REG 0x1F
+#define AS3722_GPIO_SIGNAL_OUT_REG 0x20
+#define AS3722_GPIO_SIGNAL_IN_REG 0x21
+#define AS3722_REG_SEQU_MOD1_REG 0x22
+#define AS3722_REG_SEQU_MOD2_REG 0x23
+#define AS3722_REG_SEQU_MOD3_REG 0x24
+#define AS3722_SD_PHSW_CTRL_REG 0x27
+#define AS3722_SD_PHSW_STATUS 0x28
+#define AS3722_SD0_CONTROL_REG 0x29
+#define AS3722_SD1_CONTROL_REG 0x2A
+#define AS3722_SDmph_CONTROL_REG 0x2B
+#define AS3722_SD23_CONTROL_REG 0x2C
+#define AS3722_SD4_CONTROL_REG 0x2D
+#define AS3722_SD5_CONTROL_REG 0x2E
+#define AS3722_SD6_CONTROL_REG 0x2F
+#define AS3722_SD_DVM_REG 0x30
+#define AS3722_RESET_REASON_REG 0x31
+#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32
+#define AS3722_STARTUP_CONTROL_REG 0x33
+#define AS3722_RESET_TIMER_REG 0x34
+#define AS3722_REFERENCE_CONTROL_REG 0x35
+#define AS3722_RESET_CONTROL_REG 0x36
+#define AS3722_OVER_TEMP_CONTROL_REG 0x37
+#define AS3722_WATCHDOG_CONTROL_REG 0x38
+#define AS3722_REG_STANDBY_MOD1_REG 0x39
+#define AS3722_REG_STANDBY_MOD2_REG 0x3A
+#define AS3722_REG_STANDBY_MOD3_REG 0x3B
+#define AS3722_ENABLE_CTRL1_REG 0x3C
+#define AS3722_ENABLE_CTRL2_REG 0x3D
+#define AS3722_ENABLE_CTRL3_REG 0x3E
+#define AS3722_ENABLE_CTRL4_REG 0x3F
+#define AS3722_ENABLE_CTRL5_REG 0x40
+#define AS3722_PWM_CONTROL_L_REG 0x41
+#define AS3722_PWM_CONTROL_H_REG 0x42
+#define AS3722_WATCHDOG_TIMER_REG 0x46
+#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48
+#define AS3722_IOVOLTAGE_REG 0x49
+#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A
+#define AS3722_SD_CONTROL_REG 0x4D
+#define AS3722_LDOCONTROL0_REG 0x4E
+#define AS3722_LDOCONTROL1_REG 0x4F
+#define AS3722_SD0_PROTECT_REG 0x50
+#define AS3722_SD6_PROTECT_REG 0x51
+#define AS3722_PWM_VCONTROL1_REG 0x52
+#define AS3722_PWM_VCONTROL2_REG 0x53
+#define AS3722_PWM_VCONTROL3_REG 0x54
+#define AS3722_PWM_VCONTROL4_REG 0x55
+#define AS3722_BB_CHARGER_REG 0x57
+#define AS3722_CTRL_SEQU1_REG 0x58
+#define AS3722_CTRL_SEQU2_REG 0x59
+#define AS3722_OVCURRENT_REG 0x5A
+#define AS3722_OVCURRENT_DEB_REG 0x5B
+#define AS3722_SDLV_DEB_REG 0x5C
+#define AS3722_OC_PG_CTRL_REG 0x5D
+#define AS3722_OC_PG_CTRL2_REG 0x5E
+#define AS3722_CTRL_STATUS 0x5F
+#define AS3722_RTC_CONTROL_REG 0x60
+#define AS3722_RTC_SECOND_REG 0x61
+#define AS3722_RTC_MINUTE_REG 0x62
+#define AS3722_RTC_HOUR_REG 0x63
+#define AS3722_RTC_DAY_REG 0x64
+#define AS3722_RTC_MONTH_REG 0x65
+#define AS3722_RTC_YEAR_REG 0x66
+#define AS3722_RTC_ALARM_SECOND_REG 0x67
+#define AS3722_RTC_ALARM_MINUTE_REG 0x68
+#define AS3722_RTC_ALARM_HOUR_REG 0x69
+#define AS3722_RTC_ALARM_DAY_REG 0x6A
+#define AS3722_RTC_ALARM_MONTH_REG 0x6B
+#define AS3722_RTC_ALARM_YEAR_REG 0x6C
+#define AS3722_SRAM_REG 0x6D
+#define AS3722_RTC_ACCESS_REG 0x6F
+#define AS3722_RTC_STATUS_REG 0x73
+#define AS3722_INTERRUPT_MASK1_REG 0x74
+#define AS3722_INTERRUPT_MASK2_REG 0x75
+#define AS3722_INTERRUPT_MASK3_REG 0x76
+#define AS3722_INTERRUPT_MASK4_REG 0x77
+#define AS3722_INTERRUPT_STATUS1_REG 0x78
+#define AS3722_INTERRUPT_STATUS2_REG 0x79
+#define AS3722_INTERRUPT_STATUS3_REG 0x7A
+#define AS3722_INTERRUPT_STATUS4_REG 0x7B
+#define AS3722_TEMP_STATUS_REG 0x7D
+#define AS3722_ADC0_CONTROL_REG 0x80
+#define AS3722_ADC1_CONTROL_REG 0x81
+#define AS3722_ADC0_MSB_RESULT_REG 0x82
+#define AS3722_ADC0_LSB_RESULT_REG 0x83
+#define AS3722_ADC1_MSB_RESULT_REG 0x84
+#define AS3722_ADC1_LSB_RESULT_REG 0x85
+#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86
+#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87
+#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88
+#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89
+#define AS3722_ADC_CONFIGURATION_REG 0x8A
+#define AS3722_ASIC_ID1_REG 0x90
+#define AS3722_ASIC_ID2_REG 0x91
+#define AS3722_LOCK_REG 0x9E
+#define AS3722_MAX_REGISTER 0xF4
+
+#define AS3722_SD0_EXT_ENABLE_MASK 0x03
+#define AS3722_SD1_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD2_EXT_ENABLE_MASK 0x30
+#define AS3722_SD3_EXT_ENABLE_MASK 0xC0
+#define AS3722_SD4_EXT_ENABLE_MASK 0x03
+#define AS3722_SD5_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO0_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO2_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO4_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO10_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0
+
+#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03
+#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60
+#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05
+
+#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03
+
+/* AS3722 register bits and bit masks */
+#define AS3722_LDO_ILIMIT_MASK BIT(7)
+#define AS3722_LDO_ILIMIT_BIT BIT(7)
+#define AS3722_LDO0_VSEL_MASK 0x1F
+#define AS3722_LDO0_VSEL_MIN 0x01
+#define AS3722_LDO0_VSEL_MAX 0x12
+#define AS3722_LDO0_NUM_VOLT 0x12
+#define AS3722_LDO3_VSEL_MASK 0x3F
+#define AS3722_LDO3_VSEL_MIN 0x01
+#define AS3722_LDO3_VSEL_MAX 0x2D
+#define AS3722_LDO3_NUM_VOLT 0x2D
+#define AS3722_LDO_VSEL_MASK 0x7F
+#define AS3722_LDO_VSEL_MIN 0x01
+#define AS3722_LDO_VSEL_MAX 0x7F
+#define AS3722_LDO_VSEL_DNU_MIN 0x25
+#define AS3722_LDO_VSEL_DNU_MAX 0x3F
+#define AS3722_LDO_NUM_VOLT 0x80
+
+#define AS3722_LDO0_CTRL BIT(0)
+#define AS3722_LDO1_CTRL BIT(1)
+#define AS3722_LDO2_CTRL BIT(2)
+#define AS3722_LDO3_CTRL BIT(3)
+#define AS3722_LDO4_CTRL BIT(4)
+#define AS3722_LDO5_CTRL BIT(5)
+#define AS3722_LDO6_CTRL BIT(6)
+#define AS3722_LDO7_CTRL BIT(7)
+#define AS3722_LDO9_CTRL BIT(1)
+#define AS3722_LDO10_CTRL BIT(2)
+#define AS3722_LDO11_CTRL BIT(3)
+
+#define AS3722_LDO3_MODE_MASK (3 << 6)
+#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6)
+#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0)
+#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1)
+#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2)
+#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3)
+
+#define AS3722_SD_VSEL_MASK 0x7F
+#define AS3722_SD0_VSEL_MIN 0x01
+#define AS3722_SD0_VSEL_MAX 0x5A
+#define AS3722_SD2_VSEL_MIN 0x01
+#define AS3722_SD2_VSEL_MAX 0x7F
+
+#define AS3722_SDn_CTRL(n) BIT(n)
+
+#define AS3722_SD0_MODE_FAST BIT(4)
+#define AS3722_SD1_MODE_FAST BIT(4)
+#define AS3722_SD2_MODE_FAST BIT(2)
+#define AS3722_SD3_MODE_FAST BIT(6)
+#define AS3722_SD4_MODE_FAST BIT(2)
+#define AS3722_SD5_MODE_FAST BIT(2)
+#define AS3722_SD6_MODE_FAST BIT(4)
+
+#define AS3722_POWER_OFF BIT(1)
+
+#define AS3722_INTERRUPT_MASK1_LID BIT(0)
+#define AS3722_INTERRUPT_MASK1_ACOK BIT(1)
+#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2)
+#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3)
+#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4)
+#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5)
+#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6)
+#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7)
+
+#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0)
+#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1)
+#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2)
+#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3)
+#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4)
+#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5)
+#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6)
+#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7)
+
+#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0)
+#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1)
+#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2)
+#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3)
+#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4)
+#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5)
+#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6)
+#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7)
+
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5)
+#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6)
+#define AS3722_INTERRUPT_MASK4_ADC BIT(7)
+
+#define AS3722_ADC1_INTERVAL_TIME BIT(0)
+#define AS3722_ADC1_INT_MODE_ON BIT(1)
+#define AS3722_ADC_BUF_ON BIT(2)
+#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5)
+#define AS3722_ADC1_INTEVAL_SCAN BIT(6)
+#define AS3722_ADC1_INT_MASK BIT(7)
+
+#define AS3722_ADC_MSB_VAL_MASK 0x7F
+#define AS3722_ADC_LSB_VAL_MASK 0x07
+
+#define AS3722_ADC0_CONV_START BIT(7)
+#define AS3722_ADC0_CONV_NOTREADY BIT(7)
+#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F
+
+#define AS3722_ADC1_CONV_START BIT(7)
+#define AS3722_ADC1_CONV_NOTREADY BIT(7)
+#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+
+/* GPIO modes */
+#define AS3722_GPIO_MODE_MASK 0x07
+#define AS3722_GPIO_MODE_INPUT 0x00
+#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02
+#define AS3722_GPIO_MODE_ADC_IN 0x03
+#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04
+#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06
+#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07
+#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK)
+
+#define AS3722_GPIO_INV BIT(7)
+#define AS3722_GPIO_IOSF_MASK 0x78
+#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3)
+#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0)
+#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1)
+#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2)
+#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3)
+#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4)
+#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5)
+#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7)
+#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8)
+#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9)
+#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11)
+#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12)
+#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13)
+#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14)
+
+#define AS3722_GPIOn_SIGNAL(n) BIT(n)
+#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n)
+#define AS3722_I2C_PULL_UP BIT(4)
+#define AS3722_INT_PULL_UP BIT(5)
+
+#define AS3722_RTC_REP_WAKEUP_EN BIT(0)
+#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1)
+#define AS3722_RTC_ON BIT(2)
+#define AS3722_RTC_IRQMODE BIT(3)
+#define AS3722_RTC_CLK32K_OUT_EN BIT(5)
+
+#define AS3722_WATCHDOG_TIMER_MAX 0x7F
+#define AS3722_WATCHDOG_ON BIT(0)
+#define AS3722_WATCHDOG_SW_SIG BIT(0)
+
+#define AS3722_EXT_CONTROL_ENABLE1 0x1
+#define AS3722_EXT_CONTROL_ENABLE2 0x2
+#define AS3722_EXT_CONTROL_ENABLE3 0x3
+
+/* Interrupt IDs */
+enum as3722_irq {
+ AS3722_IRQ_LID,
+ AS3722_IRQ_ACOK,
+ AS3722_IRQ_ENABLE1,
+ AS3722_IRQ_OCCUR_ALARM_SD0,
+ AS3722_IRQ_ONKEY_LONG_PRESS,
+ AS3722_IRQ_ONKEY,
+ AS3722_IRQ_OVTMP,
+ AS3722_IRQ_LOWBAT,
+ AS3722_IRQ_SD0_LV,
+ AS3722_IRQ_SD1_LV,
+ AS3722_IRQ_SD2_LV,
+ AS3722_IRQ_PWM1_OV_PROT,
+ AS3722_IRQ_PWM2_OV_PROT,
+ AS3722_IRQ_ENABLE2,
+ AS3722_IRQ_SD6_LV,
+ AS3722_IRQ_RTC_REP,
+ AS3722_IRQ_RTC_ALARM,
+ AS3722_IRQ_GPIO1,
+ AS3722_IRQ_GPIO2,
+ AS3722_IRQ_GPIO3,
+ AS3722_IRQ_GPIO4,
+ AS3722_IRQ_GPIO5,
+ AS3722_IRQ_WATCHDOG,
+ AS3722_IRQ_ENABLE3,
+ AS3722_IRQ_TEMP_SD0_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD1_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD2_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD0_ALARM,
+ AS3722_IRQ_TEMP_SD1_ALARM,
+ AS3722_IRQ_TEMP_SD6_ALARM,
+ AS3722_IRQ_OCCUR_ALARM_SD6,
+ AS3722_IRQ_ADC,
+ AS3722_IRQ_MAX,
+};
+
+struct as3722 {
+ struct device *dev;
+ struct regmap *regmap;
+ int chip_irq;
+ unsigned long irq_flags;
+ bool en_intern_int_pullup;
+ bool en_intern_i2c_pullup;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest)
+{
+ return regmap_read(as3722->regmap, reg, dest);
+}
+
+static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value)
+{
+ return regmap_write(as3722->regmap, reg, value);
+}
+
+static inline int as3722_block_read(struct as3722 *as3722, u32 reg,
+ int count, u8 *buf)
+{
+ return regmap_bulk_read(as3722->regmap, reg, buf, count);
+}
+
+static inline int as3722_block_write(struct as3722 *as3722, u32 reg,
+ int count, u8 *data)
+{
+ return regmap_bulk_write(as3722->regmap, reg, data, count);
+}
+
+static inline int as3722_update_bits(struct as3722 *as3722, u32 reg,
+ u32 mask, u8 val)
+{
+ return regmap_update_bits(as3722->regmap, reg, mask, val);
+}
+
+static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq)
+{
+ return regmap_irq_get_virq(as3722->irq_data, irq);
+}
+#endif /* __LINUX_MFD_AS3722_H__ */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cebe97ee98b8..bdba8c61207b 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -59,6 +59,12 @@ struct mfd_cell {
* pm_runtime_no_callbacks().
*/
bool pm_runtime_no_callbacks;
+
+ /* A list of regulator supplies that should be mapped to the MFD
+ * device rather than the child device when requested
+ */
+ const char **parent_supplies;
+ int num_parent_supplies;
};
/*
@@ -98,7 +104,7 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
}
extern int mfd_add_devices(struct device *parent, int id,
- struct mfd_cell *cells, int n_devs,
+ const struct mfd_cell *cells, int n_devs,
struct resource *mem_base,
int irq_base, struct irq_domain *irq_domain);
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 786d02eb79d2..21e21b81cc75 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -148,10 +148,15 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
int ret;
+ unsigned int tmp;
+ int i;
- ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
- if (ret < 0)
- return ret;
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_read(da9052->regmap, reg + i, &tmp);
+ val[i] = (unsigned char)tmp;
+ if (ret < 0)
+ return ret;
+ }
if (da9052->fix_io) {
ret = da9052->fix_io(da9052, reg);
@@ -166,10 +171,13 @@ static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
unsigned reg_cnt, unsigned char *val)
{
int ret;
+ int i;
- ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
- if (ret < 0)
- return ret;
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_write(da9052->regmap, reg + i, val[i]);
+ if (ret < 0)
+ return ret;
+ }
if (da9052->fix_io) {
ret = da9052->fix_io(da9052, reg);
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
new file mode 100644
index 000000000000..2d2a0af675fd
--- /dev/null
+++ b/include/linux/mfd/da9063/core.h
@@ -0,0 +1,93 @@
+/*
+ * Definitions for DA9063 MFD driver
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_CORE_H__
+#define __MFD_DA9063_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9063/registers.h>
+
+/* DA9063 modules */
+#define DA9063_DRVNAME_CORE "da9063-core"
+#define DA9063_DRVNAME_REGULATORS "da9063-regulators"
+#define DA9063_DRVNAME_LEDS "da9063-leds"
+#define DA9063_DRVNAME_WATCHDOG "da9063-watchdog"
+#define DA9063_DRVNAME_HWMON "da9063-hwmon"
+#define DA9063_DRVNAME_ONKEY "da9063-onkey"
+#define DA9063_DRVNAME_RTC "da9063-rtc"
+#define DA9063_DRVNAME_VIBRATION "da9063-vibration"
+
+enum da9063_models {
+ PMIC_DA9063 = 0x61,
+};
+
+/* Interrupts */
+enum da9063_irqs {
+ DA9063_IRQ_ONKEY = 0,
+ DA9063_IRQ_ALARM,
+ DA9063_IRQ_TICK,
+ DA9063_IRQ_ADC_RDY,
+ DA9063_IRQ_SEQ_RDY,
+ DA9063_IRQ_WAKE,
+ DA9063_IRQ_TEMP,
+ DA9063_IRQ_COMP_1V2,
+ DA9063_IRQ_LDO_LIM,
+ DA9063_IRQ_REG_UVOV,
+ DA9063_IRQ_VDD_MON,
+ DA9063_IRQ_WARN,
+ DA9063_IRQ_GPI0,
+ DA9063_IRQ_GPI1,
+ DA9063_IRQ_GPI2,
+ DA9063_IRQ_GPI3,
+ DA9063_IRQ_GPI4,
+ DA9063_IRQ_GPI5,
+ DA9063_IRQ_GPI6,
+ DA9063_IRQ_GPI7,
+ DA9063_IRQ_GPI8,
+ DA9063_IRQ_GPI9,
+ DA9063_IRQ_GPI10,
+ DA9063_IRQ_GPI11,
+ DA9063_IRQ_GPI12,
+ DA9063_IRQ_GPI13,
+ DA9063_IRQ_GPI14,
+ DA9063_IRQ_GPI15,
+};
+
+#define DA9063_IRQ_BASE_OFFSET 0
+#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
+
+struct da9063 {
+ /* Device */
+ struct device *dev;
+ unsigned short model;
+ unsigned short revision;
+ unsigned int flags;
+
+ /* Control interface */
+ struct regmap *regmap;
+
+ /* Interrupts */
+ int chip_irq;
+ unsigned int irq_base;
+ struct regmap_irq_chip_data *regmap_irq;
+};
+
+int da9063_device_init(struct da9063 *da9063, unsigned int irq);
+int da9063_irq_init(struct da9063 *da9063);
+
+void da9063_device_exit(struct da9063 *da9063);
+void da9063_irq_exit(struct da9063 *da9063);
+
+#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
new file mode 100644
index 000000000000..95c8742215a7
--- /dev/null
+++ b/include/linux/mfd/da9063/pdata.h
@@ -0,0 +1,111 @@
+/*
+ * Platform configuration options for DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_PDATA_H__
+#define __MFD_DA9063_PDATA_H__
+
+#include <linux/regulator/machine.h>
+
+/*
+ * Regulator configuration
+ */
+/* DA9063 regulator IDs */
+enum {
+ /* BUCKs */
+ DA9063_ID_BCORE1,
+ DA9063_ID_BCORE2,
+ DA9063_ID_BPRO,
+ DA9063_ID_BMEM,
+ DA9063_ID_BIO,
+ DA9063_ID_BPERI,
+
+ /* BCORE1 and BCORE2 in merged mode */
+ DA9063_ID_BCORES_MERGED,
+ /* BMEM and BIO in merged mode */
+ DA9063_ID_BMEM_BIO_MERGED,
+ /* When two BUCKs are merged, they cannot be reused separately */
+
+ /* LDOs */
+ DA9063_ID_LDO1,
+ DA9063_ID_LDO2,
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO4,
+ DA9063_ID_LDO5,
+ DA9063_ID_LDO6,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO10,
+ DA9063_ID_LDO11,
+};
+
+/* Regulators platform data */
+struct da9063_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct da9063_regulators_pdata {
+ unsigned n_regulators;
+ struct da9063_regulator_data *regulator_data;
+};
+
+
+/*
+ * RGB LED configuration
+ */
+/* LED IDs for flags in struct led_info. */
+enum {
+ DA9063_GPIO11_LED,
+ DA9063_GPIO14_LED,
+ DA9063_GPIO15_LED,
+
+ DA9063_LED_NUM
+};
+#define DA9063_LED_ID_MASK 0x3
+
+/* LED polarity for flags in struct led_info. */
+#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
+#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
+
+
+/*
+ * General PMIC configuration
+ */
+/* HWMON ADC channels configuration */
+#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
+#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
+#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
+#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
+#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
+#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
+#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
+#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
+
+/* Disable register caching. */
+#define DA9063_FLG_NO_CACHE 0x0008
+
+struct da9063;
+
+/* DA9063 platform data */
+struct da9063_pdata {
+ int (*init)(struct da9063 *da9063);
+ int irq_base;
+ unsigned flags;
+ struct da9063_regulators_pdata *regulators_pdata;
+ struct led_platform_data *leds_pdata;
+};
+
+#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
new file mode 100644
index 000000000000..5834813fb5f3
--- /dev/null
+++ b/include/linux/mfd/da9063/registers.h
@@ -0,0 +1,1028 @@
+/*
+ * Registers definition for DA9063 modules
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _DA9063_REG_H
+#define _DA9063_REG_H
+
+#define DA9063_I2C_PAGE_SEL_SHIFT 1
+
+#define DA9063_EVENT_REG_NUM 4
+#define DA9210_EVENT_REG_NUM 2
+#define DA9063_EXT_EVENT_REG_NUM (DA9063_EVENT_REG_NUM + \
+ DA9210_EVENT_REG_NUM)
+
+/* Page selection I2C or SPI always in the begining of any page. */
+/* Page 0 : I2C access 0x000 - 0x0FF SPI access 0x000 - 0x07F */
+/* Page 1 : SPI access 0x080 - 0x0FF */
+/* Page 2 : I2C access 0x100 - 0x1FF SPI access 0x100 - 0x17F */
+/* Page 3 : SPI access 0x180 - 0x1FF */
+#define DA9063_REG_PAGE_CON 0x00
+
+/* System Control and Event Registers */
+#define DA9063_REG_STATUS_A 0x01
+#define DA9063_REG_STATUS_B 0x02
+#define DA9063_REG_STATUS_C 0x03
+#define DA9063_REG_STATUS_D 0x04
+#define DA9063_REG_FAULT_LOG 0x05
+#define DA9063_REG_EVENT_A 0x06
+#define DA9063_REG_EVENT_B 0x07
+#define DA9063_REG_EVENT_C 0x08
+#define DA9063_REG_EVENT_D 0x09
+#define DA9063_REG_IRQ_MASK_A 0x0A
+#define DA9063_REG_IRQ_MASK_B 0x0B
+#define DA9063_REG_IRQ_MASK_C 0x0C
+#define DA9063_REG_IRQ_MASK_D 0x0D
+#define DA9063_REG_CONTROL_A 0x0E
+#define DA9063_REG_CONTROL_B 0x0F
+#define DA9063_REG_CONTROL_C 0x10
+#define DA9063_REG_CONTROL_D 0x11
+#define DA9063_REG_CONTROL_E 0x12
+#define DA9063_REG_CONTROL_F 0x13
+#define DA9063_REG_PD_DIS 0x14
+
+/* GPIO Control Registers */
+#define DA9063_REG_GPIO_0_1 0x15
+#define DA9063_REG_GPIO_2_3 0x16
+#define DA9063_REG_GPIO_4_5 0x17
+#define DA9063_REG_GPIO_6_7 0x18
+#define DA9063_REG_GPIO_8_9 0x19
+#define DA9063_REG_GPIO_10_11 0x1A
+#define DA9063_REG_GPIO_12_13 0x1B
+#define DA9063_REG_GPIO_14_15 0x1C
+#define DA9063_REG_GPIO_MODE_0_7 0x1D
+#define DA9063_REG_GPIO_MODE_8_15 0x1E
+#define DA9063_REG_GPIO_SWITCH_CONT 0x1F
+
+/* Regulator Control Registers */
+#define DA9063_REG_BCORE2_CONT 0x20
+#define DA9063_REG_BCORE1_CONT 0x21
+#define DA9063_REG_BPRO_CONT 0x22
+#define DA9063_REG_BMEM_CONT 0x23
+#define DA9063_REG_BIO_CONT 0x24
+#define DA9063_REG_BPERI_CONT 0x25
+#define DA9063_REG_LDO1_CONT 0x26
+#define DA9063_REG_LDO2_CONT 0x27
+#define DA9063_REG_LDO3_CONT 0x28
+#define DA9063_REG_LDO4_CONT 0x29
+#define DA9063_REG_LDO5_CONT 0x2A
+#define DA9063_REG_LDO6_CONT 0x2B
+#define DA9063_REG_LDO7_CONT 0x2C
+#define DA9063_REG_LDO8_CONT 0x2D
+#define DA9063_REG_LDO9_CONT 0x2E
+#define DA9063_REG_LDO10_CONT 0x2F
+#define DA9063_REG_LDO11_CONT 0x30
+#define DA9063_REG_VIB 0x31
+#define DA9063_REG_DVC_1 0x32
+#define DA9063_REG_DVC_2 0x33
+
+/* GP-ADC Control Registers */
+#define DA9063_REG_ADC_MAN 0x34
+#define DA9063_REG_ADC_CONT 0x35
+#define DA9063_REG_VSYS_MON 0x36
+#define DA9063_REG_ADC_RES_L 0x37
+#define DA9063_REG_ADC_RES_H 0x38
+#define DA9063_REG_VSYS_RES 0x39
+#define DA9063_REG_ADCIN1_RES 0x3A
+#define DA9063_REG_ADCIN2_RES 0x3B
+#define DA9063_REG_ADCIN3_RES 0x3C
+#define DA9063_REG_MON1_RES 0x3D
+#define DA9063_REG_MON2_RES 0x3E
+#define DA9063_REG_MON3_RES 0x3F
+
+/* RTC Calendar and Alarm Registers */
+#define DA9063_REG_COUNT_S 0x40
+#define DA9063_REG_COUNT_MI 0x41
+#define DA9063_REG_COUNT_H 0x42
+#define DA9063_REG_COUNT_D 0x43
+#define DA9063_REG_COUNT_MO 0x44
+#define DA9063_REG_COUNT_Y 0x45
+#define DA9063_REG_ALARM_MI 0x46
+#define DA9063_REG_ALARM_H 0x47
+#define DA9063_REG_ALARM_D 0x48
+#define DA9063_REG_ALARM_MO 0x49
+#define DA9063_REG_ALARM_Y 0x4A
+#define DA9063_REG_SECOND_A 0x4B
+#define DA9063_REG_SECOND_B 0x4C
+#define DA9063_REG_SECOND_C 0x4D
+#define DA9063_REG_SECOND_D 0x4E
+
+/* Sequencer Control Registers */
+#define DA9063_REG_SEQ 0x81
+#define DA9063_REG_SEQ_TIMER 0x82
+#define DA9063_REG_ID_2_1 0x83
+#define DA9063_REG_ID_4_3 0x84
+#define DA9063_REG_ID_6_5 0x85
+#define DA9063_REG_ID_8_7 0x86
+#define DA9063_REG_ID_10_9 0x87
+#define DA9063_REG_ID_12_11 0x88
+#define DA9063_REG_ID_14_13 0x89
+#define DA9063_REG_ID_16_15 0x8A
+#define DA9063_REG_ID_18_17 0x8B
+#define DA9063_REG_ID_20_19 0x8C
+#define DA9063_REG_ID_22_21 0x8D
+#define DA9063_REG_ID_24_23 0x8E
+#define DA9063_REG_ID_26_25 0x8F
+#define DA9063_REG_ID_28_27 0x90
+#define DA9063_REG_ID_30_29 0x91
+#define DA9063_REG_ID_32_31 0x92
+#define DA9063_REG_SEQ_A 0x95
+#define DA9063_REG_SEQ_B 0x96
+#define DA9063_REG_WAIT 0x97
+#define DA9063_REG_EN_32K 0x98
+#define DA9063_REG_RESET 0x99
+
+/* Regulator Setting Registers */
+#define DA9063_REG_BUCK_ILIM_A 0x9A
+#define DA9063_REG_BUCK_ILIM_B 0x9B
+#define DA9063_REG_BUCK_ILIM_C 0x9C
+#define DA9063_REG_BCORE2_CFG 0x9D
+#define DA9063_REG_BCORE1_CFG 0x9E
+#define DA9063_REG_BPRO_CFG 0x9F
+#define DA9063_REG_BIO_CFG 0xA0
+#define DA9063_REG_BMEM_CFG 0xA1
+#define DA9063_REG_BPERI_CFG 0xA2
+#define DA9063_REG_VBCORE2_A 0xA3
+#define DA9063_REG_VBCORE1_A 0xA4
+#define DA9063_REG_VBPRO_A 0xA5
+#define DA9063_REG_VBMEM_A 0xA6
+#define DA9063_REG_VBIO_A 0xA7
+#define DA9063_REG_VBPERI_A 0xA8
+#define DA9063_REG_VLDO1_A 0xA9
+#define DA9063_REG_VLDO2_A 0xAA
+#define DA9063_REG_VLDO3_A 0xAB
+#define DA9063_REG_VLDO4_A 0xAC
+#define DA9063_REG_VLDO5_A 0xAD
+#define DA9063_REG_VLDO6_A 0xAE
+#define DA9063_REG_VLDO7_A 0xAF
+#define DA9063_REG_VLDO8_A 0xB0
+#define DA9063_REG_VLDO9_A 0xB1
+#define DA9063_REG_VLDO10_A 0xB2
+#define DA9063_REG_VLDO11_A 0xB3
+#define DA9063_REG_VBCORE2_B 0xB4
+#define DA9063_REG_VBCORE1_B 0xB5
+#define DA9063_REG_VBPRO_B 0xB6
+#define DA9063_REG_VBMEM_B 0xB7
+#define DA9063_REG_VBIO_B 0xB8
+#define DA9063_REG_VBPERI_B 0xB9
+#define DA9063_REG_VLDO1_B 0xBA
+#define DA9063_REG_VLDO2_B 0xBB
+#define DA9063_REG_VLDO3_B 0xBC
+#define DA9063_REG_VLDO4_B 0xBD
+#define DA9063_REG_VLDO5_B 0xBE
+#define DA9063_REG_VLDO6_B 0xBF
+#define DA9063_REG_VLDO7_B 0xC0
+#define DA9063_REG_VLDO8_B 0xC1
+#define DA9063_REG_VLDO9_B 0xC2
+#define DA9063_REG_VLDO10_B 0xC3
+#define DA9063_REG_VLDO11_B 0xC4
+
+/* Backup Battery Charger Control Register */
+#define DA9063_REG_BBAT_CONT 0xC5
+
+/* GPIO PWM (LED) */
+#define DA9063_REG_GPO11_LED 0xC6
+#define DA9063_REG_GPO14_LED 0xC7
+#define DA9063_REG_GPO15_LED 0xC8
+
+/* GP-ADC Threshold Registers */
+#define DA9063_REG_ADC_CFG 0xC9
+#define DA9063_REG_AUTO1_HIGH 0xCA
+#define DA9063_REG_AUTO1_LOW 0xCB
+#define DA9063_REG_AUTO2_HIGH 0xCC
+#define DA9063_REG_AUTO2_LOW 0xCD
+#define DA9063_REG_AUTO3_HIGH 0xCE
+#define DA9063_REG_AUTO3_LOW 0xCF
+
+/* DA9063 Configuration registers */
+/* OTP */
+#define DA9063_REG_OPT_COUNT 0x101
+#define DA9063_REG_OPT_ADDR 0x102
+#define DA9063_REG_OPT_DATA 0x103
+
+/* Customer Trim and Configuration */
+#define DA9063_REG_T_OFFSET 0x104
+#define DA9063_REG_INTERFACE 0x105
+#define DA9063_REG_CONFIG_A 0x106
+#define DA9063_REG_CONFIG_B 0x107
+#define DA9063_REG_CONFIG_C 0x108
+#define DA9063_REG_CONFIG_D 0x109
+#define DA9063_REG_CONFIG_E 0x10A
+#define DA9063_REG_CONFIG_F 0x10B
+#define DA9063_REG_CONFIG_G 0x10C
+#define DA9063_REG_CONFIG_H 0x10D
+#define DA9063_REG_CONFIG_I 0x10E
+#define DA9063_REG_CONFIG_J 0x10F
+#define DA9063_REG_CONFIG_K 0x110
+#define DA9063_REG_CONFIG_L 0x111
+#define DA9063_REG_MON_REG_1 0x112
+#define DA9063_REG_MON_REG_2 0x113
+#define DA9063_REG_MON_REG_3 0x114
+#define DA9063_REG_MON_REG_4 0x115
+#define DA9063_REG_MON_REG_5 0x116
+#define DA9063_REG_MON_REG_6 0x117
+#define DA9063_REG_TRIM_CLDR 0x118
+
+/* General Purpose Registers */
+#define DA9063_REG_GP_ID_0 0x119
+#define DA9063_REG_GP_ID_1 0x11A
+#define DA9063_REG_GP_ID_2 0x11B
+#define DA9063_REG_GP_ID_3 0x11C
+#define DA9063_REG_GP_ID_4 0x11D
+#define DA9063_REG_GP_ID_5 0x11E
+#define DA9063_REG_GP_ID_6 0x11F
+#define DA9063_REG_GP_ID_7 0x120
+#define DA9063_REG_GP_ID_8 0x121
+#define DA9063_REG_GP_ID_9 0x122
+#define DA9063_REG_GP_ID_10 0x123
+#define DA9063_REG_GP_ID_11 0x124
+#define DA9063_REG_GP_ID_12 0x125
+#define DA9063_REG_GP_ID_13 0x126
+#define DA9063_REG_GP_ID_14 0x127
+#define DA9063_REG_GP_ID_15 0x128
+#define DA9063_REG_GP_ID_16 0x129
+#define DA9063_REG_GP_ID_17 0x12A
+#define DA9063_REG_GP_ID_18 0x12B
+#define DA9063_REG_GP_ID_19 0x12C
+
+/* Chip ID and variant */
+#define DA9063_REG_CHIP_ID 0x181
+#define DA9063_REG_CHIP_VARIANT 0x182
+
+/*
+ * PMIC registers bits
+ */
+/* DA9063_REG_PAGE_CON (addr=0x00) */
+#define DA9063_PEG_PAGE_SHIFT 0
+#define DA9063_REG_PAGE_MASK 0x07
+#define DA9063_REG_PAGE0 0x00
+#define DA9063_REG_PAGE2 0x02
+#define DA9063_PAGE_WRITE_MODE 0x00
+#define DA9063_REPEAT_WRITE_MODE 0x40
+#define DA9063_PAGE_REVERT 0x80
+
+/* DA9063_REG_STATUS_A (addr=0x01) */
+#define DA9063_NONKEY 0x01
+#define DA9063_WAKE 0x02
+#define DA9063_DVC_BUSY 0x04
+#define DA9063_COMP_1V2 0x08
+
+/* DA9063_REG_STATUS_B (addr=0x02) */
+#define DA9063_GPI0 0x01
+#define DA9063_GPI1 0x02
+#define DA9063_GPI2 0x04
+#define DA9063_GPI3 0x08
+#define DA9063_GPI4 0x10
+#define DA9063_GPI5 0x20
+#define DA9063_GPI6 0x40
+#define DA9063_GPI7 0x80
+
+/* DA9063_REG_STATUS_C (addr=0x03) */
+#define DA9063_GPI8 0x01
+#define DA9063_GPI9 0x02
+#define DA9063_GPI10 0x04
+#define DA9063_GPI11 0x08
+#define DA9063_GPI12 0x10
+#define DA9063_GPI13 0x20
+#define DA9063_GPI14 0x40
+#define DA9063_GPI15 0x80
+
+/* DA9063_REG_STATUS_D (addr=0x04) */
+#define DA9063_LDO3_LIM 0x08
+#define DA9063_LDO4_LIM 0x10
+#define DA9063_LDO7_LIM 0x20
+#define DA9063_LDO8_LIM 0x40
+#define DA9063_LDO11_LIM 0x80
+
+/* DA9063_REG_FAULT_LOG (addr=0x05) */
+#define DA9063_TWD_ERROR 0x01
+#define DA9063_POR 0x02
+#define DA9063_VDD_FAULT 0x04
+#define DA9063_VDD_START 0x08
+#define DA9063_TEMP_CRIT 0x10
+#define DA9063_KEY_RESET 0x20
+#define DA9063_NSHUTDOWN 0x40
+#define DA9063_WAIT_SHUT 0x80
+
+/* DA9063_REG_EVENT_A (addr=0x06) */
+#define DA9063_E_NONKEY 0x01
+#define DA9063_E_ALARM 0x02
+#define DA9063_E_TICK 0x04
+#define DA9063_E_ADC_RDY 0x08
+#define DA9063_E_SEQ_RDY 0x10
+#define DA9063_EVENTS_B 0x20
+#define DA9063_EVENTS_C 0x40
+#define DA9063_EVENTS_D 0x80
+
+/* DA9063_REG_EVENT_B (addr=0x07) */
+#define DA9063_E_WAKE 0x01
+#define DA9063_E_TEMP 0x02
+#define DA9063_E_COMP_1V2 0x04
+#define DA9063_E_LDO_LIM 0x08
+#define DA9063_E_REG_UVOV 0x10
+#define DA9063_E_DVC_RDY 0x20
+#define DA9063_E_VDD_MON 0x40
+#define DA9063_E_VDD_WARN 0x80
+
+/* DA9063_REG_EVENT_C (addr=0x08) */
+#define DA9063_E_GPI0 0x01
+#define DA9063_E_GPI1 0x02
+#define DA9063_E_GPI2 0x04
+#define DA9063_E_GPI3 0x08
+#define DA9063_E_GPI4 0x10
+#define DA9063_E_GPI5 0x20
+#define DA9063_E_GPI6 0x40
+#define DA9063_E_GPI7 0x80
+
+/* DA9063_REG_EVENT_D (addr=0x09) */
+#define DA9063_E_GPI8 0x01
+#define DA9063_E_GPI9 0x02
+#define DA9063_E_GPI10 0x04
+#define DA9063_E_GPI11 0x08
+#define DA9063_E_GPI12 0x10
+#define DA9063_E_GPI13 0x20
+#define DA9063_E_GPI14 0x40
+#define DA9063_E_GPI15 0x80
+
+/* DA9063_REG_IRQ_MASK_A (addr=0x0A) */
+#define DA9063_M_ONKEY 0x01
+#define DA9063_M_ALARM 0x02
+#define DA9063_M_TICK 0x04
+#define DA9063_M_ADC_RDY 0x08
+#define DA9063_M_SEQ_RDY 0x10
+
+/* DA9063_REG_IRQ_MASK_B (addr=0x0B) */
+#define DA9063_M_WAKE 0x01
+#define DA9063_M_TEMP 0x02
+#define DA9063_M_COMP_1V2 0x04
+#define DA9063_M_LDO_LIM 0x08
+#define DA9063_M_UVOV 0x10
+#define DA9063_M_DVC_RDY 0x20
+#define DA9063_M_VDD_MON 0x40
+#define DA9063_M_VDD_WARN 0x80
+
+/* DA9063_REG_IRQ_MASK_C (addr=0x0C) */
+#define DA9063_M_GPI0 0x01
+#define DA9063_M_GPI1 0x02
+#define DA9063_M_GPI2 0x04
+#define DA9063_M_GPI3 0x08
+#define DA9063_M_GPI4 0x10
+#define DA9063_M_GPI5 0x20
+#define DA9063_M_GPI6 0x40
+#define DA9063_M_GPI7 0x80
+
+/* DA9063_REG_IRQ_MASK_D (addr=0x0D) */
+#define DA9063_M_GPI8 0x01
+#define DA9063_M_GPI9 0x02
+#define DA9063_M_GPI10 0x04
+#define DA9063_M_GPI11 0x08
+#define DA9063_M_GPI12 0x10
+#define DA9063_M_GPI13 0x20
+#define DA9063_M_GPI14 0x40
+#define DA9063_M_GPI15 0x80
+
+/* DA9063_REG_CONTROL_A (addr=0x0E) */
+#define DA9063_SYSTEM_EN 0x01
+#define DA9063_POWER_EN 0x02
+#define DA9063_POWER1_EN 0x04
+#define DA9063_STANDBY 0x08
+#define DA9063_M_SYSTEM_EN 0x10
+#define DA9063_M_POWER_EN 0x20
+#define DA9063_M_POWER1_EN 0x40
+#define DA9063_CP_EN 0x80
+
+/* DA9063_REG_CONTROL_B (addr=0x0F) */
+#define DA9063_CHG_SEL 0x01
+#define DA9063_WATCHDOG_PD 0x02
+#define DA9063_NRES_MODE 0x08
+#define DA9063_NONKEY_LOCK 0x10
+
+/* DA9063_REG_CONTROL_C (addr=0x10) */
+#define DA9063_DEBOUNCING_MASK 0x07
+#define DA9063_DEBOUNCING_OFF 0x0
+#define DA9063_DEBOUNCING_0MS1 0x1
+#define DA9063_DEBOUNCING_1MS 0x2
+#define DA9063_DEBOUNCING_10MS24 0x3
+#define DA9063_DEBOUNCING_51MS2 0x4
+#define DA9063_DEBOUNCING_256MS 0x5
+#define DA9063_DEBOUNCING_512MS 0x6
+#define DA9063_DEBOUNCING_1024MS 0x7
+
+#define DA9063_AUTO_BOOT 0x08
+#define DA9063_OTPREAD_EN 0x10
+#define DA9063_SLEW_RATE_MASK 0x60
+#define DA9063_SLEW_RATE_4US 0x00
+#define DA9063_SLEW_RATE_3US 0x20
+#define DA9063_SLEW_RATE_1US 0x40
+#define DA9063_SLEW_RATE_0US5 0x60
+#define DA9063_DEF_SUPPLY 0x80
+
+/* DA9063_REG_CONTROL_D (addr=0x11) */
+#define DA9063_TWDSCALE_MASK 0x07
+#define DA9063_BLINK_FRQ_MASK 0x38
+#define DA9063_BLINK_FRQ_OFF 0x00
+#define DA9063_BLINK_FRQ_1S0 0x08
+#define DA9063_BLINK_FRQ_2S0 0x10
+#define DA9063_BLINK_FRQ_4S0 0x18
+#define DA9063_BLINK_FRQ_0S18 0x20
+#define DA9063_BLINK_FRQ_2S0_VDD 0x28
+#define DA9063_BLINK_FRQ_4S0_VDD 0x30
+#define DA9063_BLINK_FRQ_0S18_VDD 0x38
+
+#define DA9063_BLINK_DUR_MASK 0xC0
+#define DA9063_BLINK_DUR_10MS 0x00
+#define DA9063_BLINK_DUR_20MS 0x40
+#define DA9063_BLINK_DUR_40MS 0x80
+#define DA9063_BLINK_DUR_20MSDBL 0xC0
+
+/* DA9063_REG_CONTROL_E (addr=0x12) */
+#define DA9063_RTC_MODE_PD 0x01
+#define DA9063_RTC_MODE_SD 0x02
+#define DA9063_RTC_EN 0x04
+#define DA9063_ECO_MODE 0x08
+#define DA9063_PM_FB1_PIN 0x10
+#define DA9063_PM_FB2_PIN 0x20
+#define DA9063_PM_FB3_PIN 0x40
+#define DA9063_V_LOCK 0x80
+
+/* DA9063_REG_CONTROL_F (addr=0x13) */
+#define DA9063_WATCHDOG 0x01
+#define DA9063_SHUTDOWN 0x02
+#define DA9063_WAKE_UP 0x04
+
+/* DA9063_REG_PD_DIS (addr=0x14) */
+#define DA9063_GPI_DIS 0x01
+#define DA9063_GPADC_PAUSE 0x02
+#define DA9063_PMIF_DIS 0x04
+#define DA9063_HS2WIRE_DIS 0x08
+#define DA9063_BBAT_DIS 0x20
+#define DA9063_OUT_32K_PAUSE 0x40
+#define DA9063_PMCONT_DIS 0x80
+
+/* DA9063_REG_GPIO_0_1 (addr=0x15) */
+#define DA9063_GPIO0_PIN_MASK 0x03
+#define DA9063_GPIO0_PIN_ADCIN1 0x00
+#define DA9063_GPIO0_PIN_GPI 0x01
+#define DA9063_GPIO0_PIN_GPO_OD 0x02
+#define DA9063_GPIO0_PIN_GPO 0x03
+#define DA9063_GPIO0_TYPE 0x04
+#define DA9063_GPIO0_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO0_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO0_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO0_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO0_NO_WAKEUP 0x08
+#define DA9063_GPIO1_PIN_MASK 0x30
+#define DA9063_GPIO1_PIN_ADCIN2_COMP 0x00
+#define DA9063_GPIO1_PIN_GPI 0x10
+#define DA9063_GPIO1_PIN_GPO_OD 0x20
+#define DA9063_GPIO1_PIN_GPO 0x30
+#define DA9063_GPIO1_TYPE 0x40
+#define DA9063_GPIO1_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO1_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO1_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO1_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO1_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_2_3 (addr=0x16) */
+#define DA9063_GPIO2_PIN_MASK 0x03
+#define DA9063_GPIO2_PIN_ADCIN3 0x00
+#define DA9063_GPIO2_PIN_GPI 0x01
+#define DA9063_GPIO2_PIN_GPO_PSS 0x02
+#define DA9063_GPIO2_PIN_GPO 0x03
+#define DA9063_GPIO2_TYPE 0x04
+#define DA9063_GPIO2_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO2_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO2_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO2_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO2_NO_WAKEUP 0x08
+#define DA9063_GPIO3_PIN_MASK 0x30
+#define DA9063_GPIO3_PIN_CORE_SW_G 0x00
+#define DA9063_GPIO3_PIN_GPI 0x10
+#define DA9063_GPIO3_PIN_GPO_OD 0x20
+#define DA9063_GPIO3_PIN_GPO 0x30
+#define DA9063_GPIO3_TYPE 0x40
+#define DA9063_GPIO3_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO3_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO3_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO3_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO3_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_4_5 (addr=0x17) */
+#define DA9063_GPIO4_PIN_MASK 0x03
+#define DA9063_GPIO4_PIN_CORE_SW_S 0x00
+#define DA9063_GPIO4_PIN_GPI 0x01
+#define DA9063_GPIO4_PIN_GPO_OD 0x02
+#define DA9063_GPIO4_PIN_GPO 0x03
+#define DA9063_GPIO4_TYPE 0x04
+#define DA9063_GPIO4_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO4_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO4_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO4_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO4_NO_WAKEUP 0x08
+#define DA9063_GPIO5_PIN_MASK 0x30
+#define DA9063_GPIO5_PIN_PERI_SW_G 0x00
+#define DA9063_GPIO5_PIN_GPI 0x10
+#define DA9063_GPIO5_PIN_GPO_OD 0x20
+#define DA9063_GPIO5_PIN_GPO 0x30
+#define DA9063_GPIO5_TYPE 0x40
+#define DA9063_GPIO5_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO5_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO5_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO5_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO5_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_6_7 (addr=0x18) */
+#define DA9063_GPIO6_PIN_MASK 0x03
+#define DA9063_GPIO6_PIN_PERI_SW_S 0x00
+#define DA9063_GPIO6_PIN_GPI 0x01
+#define DA9063_GPIO6_PIN_GPO_OD 0x02
+#define DA9063_GPIO6_PIN_GPO 0x03
+#define DA9063_GPIO6_TYPE 0x04
+#define DA9063_GPIO6_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO6_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO6_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO6_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO6_NO_WAKEUP 0x08
+#define DA9063_GPIO7_PIN_MASK 0x30
+#define DA9063_GPIO7_PIN_GPI 0x10
+#define DA9063_GPIO7_PIN_GPO_PSS 0x20
+#define DA9063_GPIO7_PIN_GPO 0x30
+#define DA9063_GPIO7_TYPE 0x40
+#define DA9063_GPIO7_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO7_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO7_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO7_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO7_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_8_9 (addr=0x19) */
+#define DA9063_GPIO8_PIN_MASK 0x03
+#define DA9063_GPIO8_PIN_GPI_SYS_EN 0x00
+#define DA9063_GPIO8_PIN_GPI 0x01
+#define DA9063_GPIO8_PIN_GPO_PSS 0x02
+#define DA9063_GPIO8_PIN_GPO 0x03
+#define DA9063_GPIO8_TYPE 0x04
+#define DA9063_GPIO8_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO8_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO8_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO8_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO8_NO_WAKEUP 0x08
+#define DA9063_GPIO9_PIN_MASK 0x30
+#define DA9063_GPIO9_PIN_GPI_PWR_EN 0x00
+#define DA9063_GPIO9_PIN_GPI 0x10
+#define DA9063_GPIO9_PIN_GPO_PSS 0x20
+#define DA9063_GPIO9_PIN_GPO 0x30
+#define DA9063_GPIO9_TYPE 0x40
+#define DA9063_GPIO9_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO9_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO9_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO9_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO9_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_10_11 (addr=0x1A) */
+#define DA9063_GPIO10_PIN_MASK 0x03
+#define DA9063_GPIO10_PIN_GPI_PWR1_EN 0x00
+#define DA9063_GPIO10_PIN_GPI 0x01
+#define DA9063_GPIO10_PIN_GPO_OD 0x02
+#define DA9063_GPIO10_PIN_GPO 0x03
+#define DA9063_GPIO10_TYPE 0x04
+#define DA9063_GPIO10_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO10_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO10_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO10_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO10_NO_WAKEUP 0x08
+#define DA9063_GPIO11_PIN_MASK 0x30
+#define DA9063_GPIO11_PIN_GPO_OD 0x00
+#define DA9063_GPIO11_PIN_GPI 0x10
+#define DA9063_GPIO11_PIN_GPO_PSS 0x20
+#define DA9063_GPIO11_PIN_GPO 0x30
+#define DA9063_GPIO11_TYPE 0x40
+#define DA9063_GPIO11_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO11_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO11_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO11_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO11_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_12_13 (addr=0x1B) */
+#define DA9063_GPIO12_PIN_MASK 0x03
+#define DA9063_GPIO12_PIN_NVDDFLT_OUT 0x00
+#define DA9063_GPIO12_PIN_GPI 0x01
+#define DA9063_GPIO12_PIN_VSYSMON_OUT 0x02
+#define DA9063_GPIO12_PIN_GPO 0x03
+#define DA9063_GPIO12_TYPE 0x04
+#define DA9063_GPIO12_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO12_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO12_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO12_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO12_NO_WAKEUP 0x08
+#define DA9063_GPIO13_PIN_MASK 0x30
+#define DA9063_GPIO13_PIN_GPFB1_OUT 0x00
+#define DA9063_GPIO13_PIN_GPI 0x10
+#define DA9063_GPIO13_PIN_GPFB1_OUTOD 0x20
+#define DA9063_GPIO13_PIN_GPO 0x30
+#define DA9063_GPIO13_TYPE 0x40
+#define DA9063_GPIO13_TYPE_GPFB1_OUT 0x00
+#define DA9063_GPIO13_TYPE_GPI 0x00
+#define DA9063_GPIO13_TYPE_GPFB1_OUTOD 0x04
+#define DA9063_GPIO13_TYPE_GPO 0x04
+#define DA9063_GPIO13_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_14_15 (addr=0x1C) */
+#define DA9063_GPIO14_PIN_MASK 0x03
+#define DA9063_GPIO14_PIN_GPO_OD 0x00
+#define DA9063_GPIO14_PIN_GPI 0x01
+#define DA9063_GPIO14_PIN_HS2DATA 0x02
+#define DA9063_GPIO14_PIN_GPO 0x03
+#define DA9063_GPIO14_TYPE 0x04
+#define DA9063_GPIO14_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO14_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO14_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO14_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO14_NO_WAKEUP 0x08
+#define DA9063_GPIO15_PIN_MASK 0x30
+#define DA9063_GPIO15_PIN_GPO_OD 0x00
+#define DA9063_GPIO15_PIN_GPI 0x10
+#define DA9063_GPIO15_PIN_GPO 0x30
+#define DA9063_GPIO15_TYPE 0x40
+#define DA9063_GPIO15_TYPE_GPFB1_OUT 0x00
+#define DA9063_GPIO15_TYPE_GPI 0x00
+#define DA9063_GPIO15_TYPE_GPFB1_OUTOD 0x04
+#define DA9063_GPIO15_TYPE_GPO 0x04
+#define DA9063_GPIO15_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_MODE_0_7 (addr=0x1D) */
+#define DA9063_GPIO0_MODE 0x01
+#define DA9063_GPIO1_MODE 0x02
+#define DA9063_GPIO2_MODE 0x04
+#define DA9063_GPIO3_MODE 0x08
+#define DA9063_GPIO4_MODE 0x10
+#define DA9063_GPIO5_MODE 0x20
+#define DA9063_GPIO6_MODE 0x40
+#define DA9063_GPIO7_MODE 0x80
+
+/* DA9063_REG_GPIO_MODE_8_15 (addr=0x1E) */
+#define DA9063_GPIO8_MODE 0x01
+#define DA9063_GPIO9_MODE 0x02
+#define DA9063_GPIO10_MODE 0x04
+#define DA9063_GPIO11_MODE 0x08
+#define DA9063_GPIO11_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO11_MODE_LED_ACT_LOW 0x08
+#define DA9063_GPIO12_MODE 0x10
+#define DA9063_GPIO13_MODE 0x20
+#define DA9063_GPIO14_MODE 0x40
+#define DA9063_GPIO14_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO14_MODE_LED_ACT_LOW 0x40
+#define DA9063_GPIO15_MODE 0x80
+#define DA9063_GPIO15_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO15_MODE_LED_ACT_LOW 0x80
+
+/* DA9063_REG_SWITCH_CONT (addr=0x1F) */
+#define DA9063_CORE_SW_GPI_MASK 0x03
+#define DA9063_CORE_SW_GPI_OFF 0x00
+#define DA9063_CORE_SW_GPI_GPIO1 0x01
+#define DA9063_CORE_SW_GPI_GPIO2 0x02
+#define DA9063_CORE_SW_GPI_GPIO13 0x03
+#define DA9063_PERI_SW_GPI_MASK 0x0C
+#define DA9063_PERI_SW_GPI_OFF 0x00
+#define DA9063_PERI_SW_GPI_GPIO1 0x04
+#define DA9063_PERI_SW_GPI_GPIO2 0x08
+#define DA9063_PERI_SW_GPI_GPIO13 0x0C
+#define DA9063_SWITCH_SR_MASK 0x30
+#define DA9063_SWITCH_SR_1MV 0x00
+#define DA9063_SWITCH_SR_5MV 0x10
+#define DA9063_SWITCH_SR_10MV 0x20
+#define DA9063_SWITCH_SR_50MV 0x30
+#define DA9063_SWITCH_SR_DIS 0x40
+#define DA9063_CP_EN_MODE 0x80
+
+/* DA9063_REGL_Bxxxx_CONT common bits (addr=0x20-0x25) */
+#define DA9063_BUCK_EN 0x01
+#define DA9063_BUCK_GPI_MASK 0x06
+#define DA9063_BUCK_GPI_OFF 0x00
+#define DA9063_BUCK_GPI_GPIO1 0x02
+#define DA9063_BUCK_GPI_GPIO2 0x04
+#define DA9063_BUCK_GPI_GPIO13 0x06
+#define DA9063_BUCK_CONF 0x08
+#define DA9063_VBUCK_GPI_MASK 0x60
+#define DA9063_VBUCK_GPI_OFF 0x00
+#define DA9063_VBUCK_GPI_GPIO1 0x20
+#define DA9063_VBUCK_GPI_GPIO2 0x40
+#define DA9063_VBUCK_GPI_GPIO13 0x60
+
+/* DA9063_REG_BCORE1_CONT specific bits (addr=0x21) */
+#define DA9063_CORE_SW_EN 0x10
+#define DA9063_CORE_SW_CONF 0x80
+
+/* DA9063_REG_BPERI_CONT specific bits (addr=0x25) */
+#define DA9063_PERI_SW_EN 0x10
+#define DA9063_PERI_SW_CONF 0x80
+
+/* DA9063_REG_LDOx_CONT common bits (addr=0x26-0x30) */
+#define DA9063_LDO_EN 0x01
+#define DA9063_LDO_GPI_MASK 0x06
+#define DA9063_LDO_GPI_OFF 0x00
+#define DA9063_LDO_GPI_GPIO1 0x02
+#define DA9063_LDO_GPI_GPIO2 0x04
+#define DA9063_LDO_GPI_GPIO13 0x06
+#define DA9063_LDO_PD_DIS 0x08
+#define DA9063_VLDO_GPI_MASK 0x60
+#define DA9063_VLDO_GPI_OFF 0x00
+#define DA9063_VLDO_GPI_GPIO1 0x20
+#define DA9063_VLDO_GPI_GPIO2 0x40
+#define DA9063_VLDO_GPI_GPIO13 0x60
+#define DA9063_LDO_CONF 0x80
+
+/* DA9063_REG_LDO5_CONT specific bits (addr=0x2A) */
+#define DA9063_VLDO5_SEL 0x10
+
+/* DA9063_REG_LDO6_CONT specific bits (addr=0x2B) */
+#define DA9063_VLDO6_SEL 0x10
+
+/* DA9063_REG_LDO7_CONT specific bits (addr=0x2C) */
+#define DA9063_VLDO7_SEL 0x10
+
+/* DA9063_REG_LDO8_CONT specific bits (addr=0x2D) */
+#define DA9063_VLDO8_SEL 0x10
+
+/* DA9063_REG_LDO9_CONT specific bits (addr=0x2E) */
+#define DA9063_VLDO9_SEL 0x10
+
+/* DA9063_REG_LDO10_CONT specific bits (addr=0x2F) */
+#define DA9063_VLDO10_SEL 0x10
+
+/* DA9063_REG_LDO11_CONT specific bits (addr=0x30) */
+#define DA9063_VLDO11_SEL 0x10
+
+/* DA9063_REG_VIB (addr=0x31) */
+#define DA9063_VIB_SET_MASK 0x3F
+#define DA9063_VIB_SET_OFF 0
+#define DA9063_VIB_SET_MAX 0x3F
+
+/* DA9063_REG_DVC_1 (addr=0x32) */
+#define DA9063_VBCORE1_SEL 0x01
+#define DA9063_VBCORE2_SEL 0x02
+#define DA9063_VBPRO_SEL 0x04
+#define DA9063_VBMEM_SEL 0x08
+#define DA9063_VBPERI_SEL 0x10
+#define DA9063_VLDO1_SEL 0x20
+#define DA9063_VLDO2_SEL 0x40
+#define DA9063_VLDO3_SEL 0x80
+
+/* DA9063_REG_DVC_2 (addr=0x33) */
+#define DA9063_VBIO_SEL 0x01
+#define DA9063_VLDO4_SEL 0x80
+
+/* DA9063_REG_ADC_MAN (addr=0x34) */
+#define DA9063_ADC_MUX_MASK 0x0F
+#define DA9063_ADC_MUX_VSYS 0x00
+#define DA9063_ADC_MUX_ADCIN1 0x01
+#define DA9063_ADC_MUX_ADCIN2 0x02
+#define DA9063_ADC_MUX_ADCIN3 0x03
+#define DA9063_ADC_MUX_T_SENSE 0x04
+#define DA9063_ADC_MUX_VBBAT 0x05
+#define DA9063_ADC_MUX_LDO_G1 0x08
+#define DA9063_ADC_MUX_LDO_G2 0x09
+#define DA9063_ADC_MUX_LDO_G3 0x0A
+#define DA9063_ADC_MAN 0x10
+#define DA9063_ADC_MODE 0x20
+
+/* DA9063_REG_ADC_CONT (addr=0x35) */
+#define DA9063_ADC_AUTO_VSYS_EN 0x01
+#define DA9063_ADC_AUTO_AD1_EN 0x02
+#define DA9063_ADC_AUTO_AD2_EN 0x04
+#define DA9063_ADC_AUTO_AD3_EN 0x08
+#define DA9063_ADC_AD1_ISRC_EN 0x10
+#define DA9063_ADC_AD2_ISRC_EN 0x20
+#define DA9063_ADC_AD3_ISRC_EN 0x40
+#define DA9063_COMP1V2_EN 0x80
+
+/* DA9063_REG_VSYS_MON (addr=0x36) */
+#define DA9063_VSYS_VAL_MASK 0xFF
+#define DA9063_VSYS_VAL_BASE 0x00
+
+/* DA9063_REG_ADC_RES_L (addr=0x37) */
+#define DA9063_ADC_RES_L_BITS 2
+#define DA9063_ADC_RES_L_MASK 0xC0
+
+/* DA9063_REG_ADC_RES_H (addr=0x38) */
+#define DA9063_ADC_RES_M_BITS 8
+#define DA9063_ADC_RES_M_MASK 0xFF
+
+/* DA9063_REG_(xxx_RES/ADC_RES_H) (addr=0x39-0x3F) */
+#define DA9063_ADC_VAL_MASK 0xFF
+
+/* DA9063_REG_COUNT_S (addr=0x40) */
+#define DA9063_RTC_READ 0x80
+#define DA9063_COUNT_SEC_MASK 0x3F
+
+/* DA9063_REG_COUNT_MI (addr=0x41) */
+#define DA9063_COUNT_MIN_MASK 0x3F
+
+/* DA9063_REG_COUNT_H (addr=0x42) */
+#define DA9063_COUNT_HOUR_MASK 0x1F
+
+/* DA9063_REG_COUNT_D (addr=0x43) */
+#define DA9063_COUNT_DAY_MASK 0x1F
+
+/* DA9063_REG_COUNT_MO (addr=0x44) */
+#define DA9063_COUNT_MONTH_MASK 0x0F
+
+/* DA9063_REG_COUNT_Y (addr=0x45) */
+#define DA9063_COUNT_YEAR_MASK 0x3F
+#define DA9063_MONITOR 0x40
+
+/* DA9063_REG_ALARM_MI (addr=0x46) */
+#define DA9063_ALARM_STATUS_ALARM 0x80
+#define DA9063_ALARM_STATUS_TICK 0x40
+#define DA9063_ALARM_MIN_MASK 0x3F
+
+/* DA9063_REG_ALARM_H (addr=0x47) */
+#define DA9063_ALARM_HOUR_MASK 0x1F
+
+/* DA9063_REG_ALARM_D (addr=0x48) */
+#define DA9063_ALARM_DAY_MASK 0x1F
+
+/* DA9063_REG_ALARM_MO (addr=0x49) */
+#define DA9063_TICK_WAKE 0x20
+#define DA9063_TICK_TYPE 0x10
+#define DA9063_TICK_TYPE_SEC 0x00
+#define DA9063_TICK_TYPE_MIN 0x10
+#define DA9063_ALARM_MONTH_MASK 0x0F
+
+/* DA9063_REG_ALARM_Y (addr=0x4A) */
+#define DA9063_TICK_ON 0x80
+#define DA9063_ALARM_ON 0x40
+#define DA9063_ALARM_YEAR_MASK 0x3F
+
+/* DA9063_REG_WAIT (addr=0x97)*/
+#define DA9063_REG_WAIT_TIME_MASK 0xF
+#define DA9063_WAIT_TIME_0_US 0x0
+#define DA9063_WAIT_TIME_512_US 0x1
+#define DA9063_WAIT_TIME_1_MS 0x2
+#define DA9063_WAIT_TIME_2_MS 0x3
+#define DA9063_WAIT_TIME_4_1_MS 0x4
+#define DA9063_WAIT_TIME_8_2_MS 0x5
+#define DA9063_WAIT_TIME_16_4_MS 0x6
+#define DA9063_WAIT_TIME_32_8_MS 0x7
+#define DA9063_WAIT_TIME_65_5_MS 0x8
+#define DA9063_WAIT_TIME_128_MS 0x9
+#define DA9063_WAIT_TIME_256_MS 0xA
+#define DA9063_WAIT_TIME_512_MS 0xB
+#define DA9063_WAIT_TIME_1_S 0xC
+#define DA9063_WAIT_TIME_2_1_S 0xD
+
+/* DA9063_REG_EN_32K (addr=0x98)*/
+#define DA9063_STABILIZ_TIME_MASK 0x7
+#define DA9063_CRYSTAL 0x08
+#define DA9063_DELAY_MODE 0x10
+#define DA9063_OUT_CLOCK 0x20
+#define DA9063_RTC_CLOCK 0x40
+#define DA9063_OUT_32K_EN 0x80
+
+/* DA9063_REG_CHIP_VARIANT */
+#define DA9063_CHIP_VARIANT_SHIFT 4
+
+/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
+#define DA9063_BIO_ILIM_MASK 0x0F
+#define DA9063_BMEM_ILIM_MASK 0xF0
+
+/* DA9063_REG_BUCK_ILIM_B (addr=0x9B) */
+#define DA9063_BPRO_ILIM_MASK 0x0F
+#define DA9063_BPERI_ILIM_MASK 0xF0
+
+/* DA9063_REG_BUCK_ILIM_C (addr=0x9C) */
+#define DA9063_BCORE1_ILIM_MASK 0x0F
+#define DA9063_BCORE2_ILIM_MASK 0xF0
+
+/* DA9063_REG_Bxxxx_CFG common bits (addr=0x9D-0xA2) */
+#define DA9063_BUCK_FB_MASK 0x07
+#define DA9063_BUCK_PD_DIS_SHIFT 5
+#define DA9063_BUCK_MODE_MASK 0xC0
+#define DA9063_BUCK_MODE_MANUAL 0x00
+#define DA9063_BUCK_MODE_SLEEP 0x40
+#define DA9063_BUCK_MODE_SYNC 0x80
+#define DA9063_BUCK_MODE_AUTO 0xC0
+
+/* DA9063_REG_BPRO_CFG (addr=0x9F) */
+#define DA9063_BPRO_VTTR_EN 0x08
+#define DA9063_BPRO_VTT_EN 0x10
+
+/* DA9063_REG_VBxxxx_A/B (addr=0xA3-0xA8, 0xB4-0xB9) */
+#define DA9063_VBUCK_MASK 0x7F
+#define DA9063_VBUCK_BIAS 0
+#define DA9063_BUCK_SL 0x80
+
+/* DA9063_REG_VLDOx_A/B (addr=0xA9-0x3, 0xBA-0xC4) */
+#define DA9063_LDO_SL 0x80
+
+/* DA9063_REG_VLDO1_A/B (addr=0xA9, 0xBA) */
+#define DA9063_VLDO1_MASK 0x3F
+#define DA9063_VLDO1_BIAS 0
+
+/* DA9063_REG_VLDO2_A/B (addr=0xAA, 0xBB) */
+#define DA9063_VLDO2_MASK 0x3F
+#define DA9063_VLDO2_BIAS 0
+
+/* DA9063_REG_VLDO3_A/B (addr=0xAB, 0xBC) */
+#define DA9063_VLDO3_MASK 0x7F
+#define DA9063_VLDO3_BIAS 0
+
+/* DA9063_REG_VLDO4_A/B (addr=0xAC, 0xBD) */
+#define DA9063_VLDO4_MASK 0x7F
+#define DA9063_VLDO4_BIAS 0
+
+/* DA9063_REG_VLDO5_A/B (addr=0xAD, 0xBE) */
+#define DA9063_VLDO5_MASK 0x3F
+#define DA9063_VLDO5_BIAS 2
+
+/* DA9063_REG_VLDO6_A/B (addr=0xAE, 0xBF) */
+#define DA9063_VLDO6_MASK 0x3F
+#define DA9063_VLDO6_BIAS 2
+
+/* DA9063_REG_VLDO7_A/B (addr=0xAF, 0xC0) */
+#define DA9063_VLDO7_MASK 0x3F
+#define DA9063_VLDO7_BIAS 2
+
+/* DA9063_REG_VLDO8_A/B (addr=0xB0, 0xC1) */
+#define DA9063_VLDO8_MASK 0x3F
+#define DA9063_VLDO8_BIAS 2
+
+/* DA9063_REG_VLDO9_A/B (addr=0xB1, 0xC2) */
+#define DA9063_VLDO9_MASK 0x3F
+#define DA9063_VLDO9_BIAS 3
+
+/* DA9063_REG_VLDO10_A/B (addr=0xB2, 0xC3) */
+#define DA9063_VLDO10_MASK 0x3F
+#define DA9063_VLDO10_BIAS 2
+
+/* DA9063_REG_VLDO11_A/B (addr=0xB3, 0xC4) */
+#define DA9063_VLDO11_MASK 0x3F
+#define DA9063_VLDO11_BIAS 2
+
+/* DA9063_REG_GPO11_LED (addr=0xC6) */
+/* DA9063_REG_GPO14_LED (addr=0xC7) */
+/* DA9063_REG_GPO15_LED (addr=0xC8) */
+#define DA9063_GPIO_DIM 0x80
+#define DA9063_GPIO_PWM_MASK 0x7F
+
+/* DA9063_REG_CONFIG_H (addr=0x10D) */
+#define DA9063_PWM_CLK_MASK 0x01
+#define DA9063_PWM_CLK_PWM2MHZ 0x00
+#define DA9063_PWM_CLK_PWM1MHZ 0x01
+#define DA9063_LDO8_MODE_MASK 0x02
+#define DA9063_LDO8_MODE_LDO 0
+#define DA9063_LDO8_MODE_VIBR 0x02
+#define DA9063_MERGE_SENSE_MASK 0x04
+#define DA9063_MERGE_SENSE_GP_FB2 0x00
+#define DA9063_MERGE_SENSE_GPIO4 0x04
+#define DA9063_BCORE_MERGE 0x08
+#define DA9063_BPRO_OD 0x10
+#define DA9063_BCORE2_OD 0x20
+#define DA9063_BCORE1_OD 0x40
+#define DA9063_BUCK_MERGE 0x80
+
+/* DA9063_REG_CONFIG_I (addr=0x10E) */
+#define DA9063_NONKEY_PIN_MASK 0x03
+#define DA9063_NONKEY_PIN_PORT 0x00
+#define DA9063_NONKEY_PIN_SWDOWN 0x01
+#define DA9063_NONKEY_PIN_AUTODOWN 0x02
+#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+
+/* DA9063_REG_MON_REG_5 (addr=0x116) */
+#define DA9063_MON_A8_IDX_MASK 0x07
+#define DA9063_MON_A8_IDX_NONE 0x00
+#define DA9063_MON_A8_IDX_BCORE1 0x01
+#define DA9063_MON_A8_IDX_BCORE2 0x02
+#define DA9063_MON_A8_IDX_BPRO 0x03
+#define DA9063_MON_A8_IDX_LDO3 0x04
+#define DA9063_MON_A8_IDX_LDO4 0x05
+#define DA9063_MON_A8_IDX_LDO11 0x06
+#define DA9063_MON_A9_IDX_MASK 0x70
+#define DA9063_MON_A9_IDX_NONE 0x00
+#define DA9063_MON_A9_IDX_BIO 0x01
+#define DA9063_MON_A9_IDX_BMEM 0x02
+#define DA9063_MON_A9_IDX_BPERI 0x03
+#define DA9063_MON_A9_IDX_LDO1 0x04
+#define DA9063_MON_A9_IDX_LDO2 0x05
+#define DA9063_MON_A9_IDX_LDO5 0x06
+
+/* DA9063_REG_MON_REG_6 (addr=0x117) */
+#define DA9063_MON_A10_IDX_MASK 0x07
+#define DA9063_MON_A10_IDX_NONE 0x00
+#define DA9063_MON_A10_IDX_LDO6 0x01
+#define DA9063_MON_A10_IDX_LDO7 0x02
+#define DA9063_MON_A10_IDX_LDO8 0x03
+#define DA9063_MON_A10_IDX_LDO9 0x04
+#define DA9063_MON_A10_IDX_LDO10 0x05
+
+#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 13a1ee95a233..5166935ce66d 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -30,6 +30,8 @@
#include <mach/hardware.h>
+struct regmap;
+
/*
* Register values.
*/
@@ -113,6 +115,7 @@ struct davinci_vc {
/* Memory resources */
void __iomem *base;
+ struct regmap *regmap;
/* MFD cells */
struct mfd_cell cells[DAVINCI_VC_CELLS];
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index ca0790fba2f5..060e11256fbc 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -12,6 +12,8 @@
#include <linux/notifier.h>
#include <linux/err.h>
+#include <dt-bindings/mfd/dbx500-prcmu.h> /* For clock identifiers */
+
/* Offset for the firmware version within the TCPM */
#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4
#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8
@@ -94,74 +96,6 @@ enum prcmu_wakeup_index {
#define PRCMU_CLKSRC_ARMCLKFIX 0x46
#define PRCMU_CLKSRC_HDMICLK 0x47
-/*
- * Clock identifiers.
- */
-enum prcmu_clock {
- PRCMU_SGACLK,
- PRCMU_UARTCLK,
- PRCMU_MSP02CLK,
- PRCMU_MSP1CLK,
- PRCMU_I2CCLK,
- PRCMU_SDMMCCLK,
- PRCMU_SPARE1CLK,
- PRCMU_SLIMCLK,
- PRCMU_PER1CLK,
- PRCMU_PER2CLK,
- PRCMU_PER3CLK,
- PRCMU_PER5CLK,
- PRCMU_PER6CLK,
- PRCMU_PER7CLK,
- PRCMU_LCDCLK,
- PRCMU_BMLCLK,
- PRCMU_HSITXCLK,
- PRCMU_HSIRXCLK,
- PRCMU_HDMICLK,
- PRCMU_APEATCLK,
- PRCMU_APETRACECLK,
- PRCMU_MCDECLK,
- PRCMU_IPI2CCLK,
- PRCMU_DSIALTCLK,
- PRCMU_DMACLK,
- PRCMU_B2R2CLK,
- PRCMU_TVCLK,
- PRCMU_SSPCLK,
- PRCMU_RNGCLK,
- PRCMU_UICCCLK,
- PRCMU_PWMCLK,
- PRCMU_IRDACLK,
- PRCMU_IRRCCLK,
- PRCMU_SIACLK,
- PRCMU_SVACLK,
- PRCMU_ACLK,
- PRCMU_HVACLK, /* Ux540 only */
- PRCMU_G1CLK, /* Ux540 only */
- PRCMU_SDMMCHCLK,
- PRCMU_CAMCLK,
- PRCMU_BML8580CLK,
- PRCMU_NUM_REG_CLOCKS,
- PRCMU_SYSCLK = PRCMU_NUM_REG_CLOCKS,
- PRCMU_CDCLK,
- PRCMU_TIMCLK,
- PRCMU_PLLSOC0,
- PRCMU_PLLSOC1,
- PRCMU_ARMSS,
- PRCMU_PLLDDR,
- PRCMU_PLLDSI,
- PRCMU_DSI0CLK,
- PRCMU_DSI1CLK,
- PRCMU_DSI0ESCCLK,
- PRCMU_DSI1ESCCLK,
- PRCMU_DSI2ESCCLK,
- /* LCD DSI PLL - Ux540 only */
- PRCMU_PLLDSI_LCD,
- PRCMU_DSI0CLK_LCD,
- PRCMU_DSI1CLK_LCD,
- PRCMU_DSI0ESCCLK_LCD,
- PRCMU_DSI1ESCCLK_LCD,
- PRCMU_DSI2ESCCLK_LCD,
-};
-
/**
* enum prcmu_wdog_id - PRCMU watchdog IDs
* @PRCMU_WDOG_ALL: use all timers
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 244fb0d51589..3e050b933dd0 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -323,7 +323,6 @@ struct max77693_dev {
int irq;
int irq_gpio;
- bool wakeup;
struct mutex irqlock;
int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 676f0f388992..3f3dc45f93ee 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -64,8 +64,6 @@ struct max77693_muic_platform_data {
};
struct max77693_platform_data {
- int wakeup;
-
/* regulator data */
struct max77693_regulator_data *regulators;
int num_regulators;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 41ed59276c00..67c17b5a6f44 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -41,6 +41,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
unsigned int mode, unsigned int channel,
u8 ato, bool atox, unsigned int *sample);
+#define MC13783_AUDIO_RX0 36
+#define MC13783_AUDIO_RX1 37
+#define MC13783_AUDIO_TX 38
+#define MC13783_SSI_NETWORK 39
+#define MC13783_AUDIO_CODEC 40
+#define MC13783_AUDIO_DAC 41
+
#define MC13XXX_IRQ_ADCDONE 0
#define MC13XXX_IRQ_ADCBISDONE 1
#define MC13XXX_IRQ_TS 2
diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h
index a9e8bd157673..f682953043ba 100644
--- a/include/linux/mfd/mcp.h
+++ b/include/linux/mfd/mcp.h
@@ -10,6 +10,8 @@
#ifndef MCP_H
#define MCP_H
+#include <linux/device.h>
+
struct mcp_ops;
struct mcp {
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 37e48c957791..9974e387e483 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -184,6 +184,50 @@ enum palmas_regulators {
PALMAS_NUM_REGS,
};
+/* External controll signal name */
+enum {
+ PALMAS_EXT_CONTROL_ENABLE1 = 0x1,
+ PALMAS_EXT_CONTROL_ENABLE2 = 0x2,
+ PALMAS_EXT_CONTROL_NSLEEP = 0x4,
+};
+
+/*
+ * Palmas device resources can be controlled externally for
+ * enabling/disabling it rather than register write through i2c.
+ * Add the external controlled requestor ID for different resources.
+ */
+enum palmas_external_requestor_id {
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN1,
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN2,
+ PALMAS_EXTERNAL_REQSTR_ID_SYSEN1,
+ PALMAS_EXTERNAL_REQSTR_ID_SYSEN2,
+ PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
+ PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN3,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS3,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS6,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS7,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS8,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS9,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO1,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO2,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO3,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO4,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO5,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO6,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO7,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO8,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO9,
+ PALMAS_EXTERNAL_REQSTR_ID_LDOLN,
+ PALMAS_EXTERNAL_REQSTR_ID_LDOUSB,
+
+ /* Last entry */
+ PALMAS_EXTERNAL_REQSTR_ID_MAX,
+};
+
struct palmas_pmic_platform_data {
/* An array of pointers to regulator init data indexed by regulator
* ID
@@ -259,6 +303,7 @@ struct palmas_platform_data {
*/
int mux_from_pdata;
u8 pad1, pad2;
+ bool pm_off;
struct palmas_pmic_platform_data *pmic_pdata;
struct palmas_gpadc_platform_data *gpadc_pdata;
@@ -2878,4 +2923,9 @@ static inline int palmas_irq_get_virq(struct palmas *palmas, int irq)
return regmap_irq_get_virq(palmas->irq_data, irq);
}
+
+int palmas_ext_control_req_config(struct palmas *palmas,
+ enum palmas_external_requestor_id ext_control_req_id,
+ int ext_ctrl, bool enable);
+
#endif /* __LINUX_MFD_PALMAS_H */
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h
index 2b13970596f5..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/mfd/rtsx_common.h
@@ -1,6 +1,6 @@
/* Driver for Realtek driver-based card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#ifndef __RTSX_COMMON_H
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 7a9f7089435d..0ce772105508 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -1,6 +1,6 @@
/* Driver for Realtek PCI-Express card reader
*
- * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,7 +17,6 @@
*
* Author:
* Wei WANG <wei_wang@realsil.com.cn>
- * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
*/
#ifndef __RTSX_PCI_H
@@ -25,8 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-
-#include "rtsx_common.h"
+#include <linux/mfd/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -184,11 +182,26 @@
#define CARD_SHARE_BAROSSA_SD 0x01
#define CARD_SHARE_BAROSSA_MS 0x02
+/* CARD_DRIVE_SEL */
+#define MS_DRIVE_8mA (0x01 << 6)
+#define MMC_DRIVE_8mA (0x01 << 4)
+#define XD_DRIVE_8mA (0x01 << 2)
+#define GPIO_DRIVE_8mA 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
/* SD30_DRIVE_SEL */
#define DRIVER_TYPE_A 0x05
#define DRIVER_TYPE_B 0x03
#define DRIVER_TYPE_C 0x02
#define DRIVER_TYPE_D 0x01
+#define CFG_DRIVER_TYPE_A 0x02
+#define CFG_DRIVER_TYPE_B 0x03
+#define CFG_DRIVER_TYPE_C 0x01
+#define CFG_DRIVER_TYPE_D 0x00
/* FPDCTL */
#define SSC_POWER_DOWN 0x01
@@ -521,6 +534,10 @@
#define SAMPLE_VAR_CLK0 (0x01 << 4)
#define SAMPLE_VAR_CLK1 (0x02 << 4)
+/* HOST_SLEEP_STATE */
+#define HOST_ENTER_S1 1
+#define HOST_ENTER_S3 2
+
#define MS_CFG 0xFD40
#define MS_TPC 0xFD41
#define MS_TRANS_CFG 0xFD42
@@ -669,6 +686,7 @@
#define PME_FORCE_CTL 0xFE56
#define ASPM_FORCE_CTL 0xFE57
#define PM_CLK_FORCE_CTL 0xFE58
+#define FUNC_FORCE_CTL 0xFE59
#define PERST_GLITCH_WIDTH 0xFE5C
#define CHANGE_LINK_STATE 0xFE5B
#define RESET_LOAD_REG 0xFE5E
@@ -684,6 +702,13 @@
#define DUMMY_REG_RESET_0 0xFE90
+#define AUTOLOAD_CFG_BASE 0xFF00
+
+#define PM_CTRL1 0xFF44
+#define PM_CTRL2 0xFF45
+#define PM_CTRL3 0xFF46
+#define PM_CTRL4 0xFF47
+
/* Memory mapping */
#define SRAM_BASE 0xE600
#define RBUF_BASE 0xF400
@@ -726,6 +751,64 @@
#define PHY_FLD4 0x1E
#define PHY_DUM_REG 0x1F
+#define LCTLR 0x80
+#define PCR_SETTING_REG1 0x724
+#define PCR_SETTING_REG2 0x814
+#define PCR_SETTING_REG3 0x747
+
+/* Phy bits */
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+
+#define PHY_RCR1_ADP_TIME 0x0100
+#define PHY_RCR1_VCO_COARSE 0x001F
+
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_CP_10 0x0400
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CPADJEN 0x0020
+#define PHY_RCR2_CDR_SC_8 0x0008
+#define PHY_RCR2_CALIB_LATE 0x0002
+
+#define PHY_RDR_RXDSEL_1_9 0x4000
+
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
+#define PHY_REG_REV_RESV 0xE000
+#define PHY_REG_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REG_REV_P1_EN 0x0800
+#define PHY_REG_REV_RXIDLE_EN 0x0400
+#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040
+#define PHY_REG_REV_STOP_CLKRD 0x0020
+#define PHY_REG_REV_RX_PWST 0x0008
+#define PHY_REG_REV_STOP_CLKWR 0x0004
+
+#define PHY_FLD3_TIMER_4 0x7800
+#define PHY_FLD3_TIMER_6 0x00E0
+#define PHY_FLD3_RXDELINK 0x0004
+
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
struct rtsx_pcr;
@@ -747,6 +830,8 @@ struct pcr_ops {
u8 voltage);
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
+ void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -788,7 +873,6 @@ struct rtsx_pcr {
struct completion *finish_me;
unsigned int cur_clock;
- bool ms_pmos;
bool remove_pci;
bool msi_en;
@@ -806,6 +890,19 @@ struct rtsx_pcr {
#define IC_VER_D 3
u8 ic_version;
+ u8 sd30_drive_sel_1v8;
+ u8 sd30_drive_sel_3v3;
+ u8 card_drive_sel;
+#define ASPM_L1_EN 0x02
+ u8 aspm_en;
+
+#define PCR_MS_PMOS (1 << 0)
+#define PCR_REVERSE_SOCKET (1 << 1)
+ u32 flags;
+
+ u32 tx_initial_phase;
+ u32 rx_initial_phase;
+
const u32 *sd_pull_ctl_enable_tbl;
const u32 *sd_pull_ctl_disable_tbl;
const u32 *ms_pull_ctl_enable_tbl;
@@ -822,6 +919,18 @@ struct rtsx_pcr {
#define PCI_VID(pcr) ((pcr)->pci->vendor)
#define PCI_PID(pcr) ((pcr)->pci->device)
+#define SDR104_PHASE(val) ((val) & 0xFF)
+#define SDR50_PHASE(val) (((val) >> 8) & 0xFF)
+#define DDR50_PHASE(val) (((val) >> 16) & 0xFF)
+#define SDR104_TX_PHASE(pcr) SDR104_PHASE((pcr)->tx_initial_phase)
+#define SDR50_TX_PHASE(pcr) SDR50_PHASE((pcr)->tx_initial_phase)
+#define DDR50_TX_PHASE(pcr) DDR50_PHASE((pcr)->tx_initial_phase)
+#define SDR104_RX_PHASE(pcr) SDR104_PHASE((pcr)->rx_initial_phase)
+#define SDR50_RX_PHASE(pcr) SDR50_PHASE((pcr)->rx_initial_phase)
+#define DDR50_RX_PHASE(pcr) DDR50_PHASE((pcr)->rx_initial_phase)
+#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50) \
+ (((ddr50) << 16) | ((sdr50) << 8) | (sdr104))
+
void rtsx_pci_start_run(struct rtsx_pcr *pcr);
int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data);
int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data);
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 378ae8a04c6a..cab2dd279076 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
struct sec_pmic_dev {
struct device *dev;
struct sec_platform_data *pdata;
- struct regmap *regmap;
+ struct regmap *regmap_pmic;
+ struct regmap *regmap_rtc;
struct i2c_client *i2c;
struct i2c_client *rtc;
@@ -51,6 +52,7 @@ struct sec_pmic_dev {
int ono;
int type;
bool wakeup;
+ bool wtsr_smpl;
};
int sec_irq_init(struct sec_pmic_dev *sec_pmic);
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 71597e20cddb..94b7cd6d8891 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -62,6 +62,11 @@ enum sec_rtc_reg {
/* RTC Update Register1 */
#define RTC_UDR_SHIFT 0
#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+#define RTC_TCON_SHIFT 1
+#define RTC_TCON_MASK (1 << RTC_TCON_SHIFT)
+#define RTC_TIME_EN_SHIFT 3
+#define RTC_TIME_EN_MASK (1 << RTC_TIME_EN_SHIFT)
+
/* RTC Hour register */
#define HOUR_PM_SHIFT 6
#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
@@ -69,6 +74,12 @@ enum sec_rtc_reg {
#define ALARM_ENABLE_SHIFT 7
#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+#define SMPL_ENABLE_SHIFT 7
+#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
+
+#define WTSR_ENABLE_SHIFT 6
+#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
+
enum {
RTC_SEC = 0,
RTC_MIN,
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index d0d52ea60074..b3ddf98dec37 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -167,11 +167,8 @@ enum s2mps11_regulators {
S2MPS11_BUCK8,
S2MPS11_BUCK9,
S2MPS11_BUCK10,
- S2MPS11_AP_EN32KHZ,
- S2MPS11_CP_EN32KHZ,
- S2MPS11_BT_EN32KHZ,
- S2MPS11_REG_MAX,
+ S2MPS11_REGULATOR_MAX,
};
#define S2MPS11_BUCK_MIN1 600000
@@ -203,6 +200,5 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK4_RAMP_EN_SHIFT 1
#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
#define S2MPS11_PMIC_EN_SHIFT 6
-#define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3)
#endif /* __LINUX_MFD_S2MPS11_H */
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
new file mode 100644
index 000000000000..eda121556e5d
--- /dev/null
+++ b/include/linux/mfd/stw481x.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef MFD_STW481X_H
+#define MFD_STW481X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+
+/* These registers are accessed from more than one driver */
+#define STW_CONF1 0x11U
+#define STW_CONF1_PDN_VMMC 0x01U
+#define STW_CONF1_VMMC_MASK 0x0eU
+#define STW_CONF1_VMMC_1_8V 0x02U
+#define STW_CONF1_VMMC_2_85V 0x04U
+#define STW_CONF1_VMMC_3V 0x06U
+#define STW_CONF1_VMMC_1_85V 0x08U
+#define STW_CONF1_VMMC_2_6V 0x0aU
+#define STW_CONF1_VMMC_2_7V 0x0cU
+#define STW_CONF1_VMMC_3_3V 0x0eU
+#define STW_CONF1_MMC_LS_STATUS 0x10U
+#define STW_PCTL_REG_LO 0x1eU
+#define STW_PCTL_REG_HI 0x1fU
+#define STW_CONF1_V_MONITORING 0x20U
+#define STW_CONF1_IT_WARN 0x40U
+#define STW_CONF1_PDN_VAUX 0x80U
+#define STW_CONF2 0x20U
+#define STW_CONF2_MASK_TWARN 0x01U
+#define STW_CONF2_VMMC_EXT 0x02U
+#define STW_CONF2_MASK_IT_WAKE_UP 0x04U
+#define STW_CONF2_GPO1 0x08U
+#define STW_CONF2_GPO2 0x10U
+#define STW_VCORE_SLEEP 0x21U
+
+/**
+ * struct stw481x - state holder for the Stw481x drivers
+ * @mutex: mutex to serialize I2C accesses
+ * @i2c_client: corresponding I2C client
+ * @regulator: regulator device for regulator children
+ * @map: regmap handle to access device registers
+ */
+struct stw481x {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator_dev *vmmc_regulator;
+ struct regmap *map;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index b473577f36db..8789fa3c7fd9 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,10 +17,35 @@
struct device_node;
+#ifdef CONFIG_MFD_SYSCON
extern struct regmap *syscon_node_to_regmap(struct device_node *np);
extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
+#else
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle(
+ struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index b6bdcd66c07d..b6d36b38b99c 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -241,6 +241,12 @@
#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+#define IMX6Q_GPR8_TX_SWING_LOW (0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL (0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB (0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB (0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1 (0x3f << 0)
+
#define IMX6Q_GPR9_TZASC2_BYP BIT(1)
#define IMX6Q_GPR9_TZASC1_BYP BIT(0)
@@ -273,7 +279,9 @@
#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25)
#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12)
#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4)
#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30)
#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
@@ -363,4 +371,9 @@
#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
+
+/* For imx6sl iomux gpr register field define */
+#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
+#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index db1791bb997a..d498d98f0c2c 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -46,16 +46,24 @@
/* Step Enable */
#define STEPENB_MASK (0x1FFFF << 0)
#define STEPENB(val) ((val) << 0)
+#define ENB(val) (1 << (val))
+#define STPENB_STEPENB STEPENB(0x1FFFF)
+#define STPENB_STEPENB_TC STEPENB(0x1FFF)
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_FIFO0THRES BIT(2)
+#define IRQENB_FIFO0OVRRUN BIT(3)
+#define IRQENB_FIFO0UNDRFLW BIT(4)
#define IRQENB_FIFO1THRES BIT(5)
+#define IRQENB_FIFO1OVRRUN BIT(6)
+#define IRQENB_FIFO1UNDRFLW BIT(7)
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
#define STEPCONFIG_MODE_MASK (3 << 0)
#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
#define STEPCONFIG_AVG_MASK (7 << 2)
#define STEPCONFIG_AVG(val) ((val) << 2)
@@ -121,18 +129,23 @@
#define SEQ_STATUS BIT(5)
#define ADC_CLK 3000000
-#define MAX_CLK_DIV 7
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
+#define FIFO1_THRESHOLD 19
/*
-* ADC runs at 3MHz, and it takes
-* 15 cycles to latch one data output.
-* Hence the idle time for ADC to
-* process one sample data would be
-* around 5 micro seconds.
-*/
-#define IDLE_TIMEOUT 5 /* microsec */
+ * time in us for processing a single channel, calculated as follows:
+ *
+ * num cycles = open delay + (sample delay + conv time) * averaging
+ *
+ * num cycles: 152 + (1 + 13) * 16 = 376
+ *
+ * clock frequency: 26MHz / 8 = 3.25MHz
+ * clock period: 1 / 3.25MHz = 308ns
+ *
+ * processing time: 376 * 308ns = 116us
+ */
+#define IDLE_TIMEOUT 116 /* microsec */
#define TSCADC_CELLS 2
@@ -147,6 +160,7 @@ struct ti_tscadc_dev {
struct mfd_cell cells[TSCADC_CELLS];
u32 reg_se_cache;
spinlock_t reg_lock;
+ unsigned int clk_div;
/* tsc device */
struct titsc *tsc;
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index ce3511326f80..b22883d60500 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -108,7 +108,6 @@ struct tmio_mmc_data {
unsigned int cd_gpio;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
- int (*get_cd)(struct platform_device *host);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
/* clock management callbacks */
int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 7e7fbce7a308..81f639bc1ae6 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -185,6 +185,7 @@
#define TWL6040_GPO_MAX 3
+/* TODO: All platform data struct can be removed */
struct twl6040_codec_data {
u16 hs_left_step;
u16 hs_right_step;
@@ -229,7 +230,6 @@ struct twl6040 {
int audpwron;
int power_count;
int rev;
- u8 vibra_ctrl_cache[2];
/* PLL configuration */
int pll;
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
index 28af41756360..88f90cbf8e6a 100644
--- a/include/linux/mfd/ucb1x00.h
+++ b/include/linux/mfd/ucb1x00.h
@@ -10,6 +10,7 @@
#ifndef UCB1200_H
#define UCB1200_H
+#include <linux/device.h>
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 40854ac0ba3d..eefafa62d304 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -56,8 +56,6 @@ struct irq_domain;
#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
struct wm8994 {
- struct mutex irq_lock;
-
struct wm8994_pdata pdata;
enum wm8994_type type;
@@ -85,16 +83,43 @@ struct wm8994 {
};
/* Device I/O API */
-int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg);
-int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
- unsigned short val);
-int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
- unsigned short mask, unsigned short val);
-int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
- int count, u16 *buf);
-int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
- int count, const u16 *buf);
+static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wm8994->regmap, reg, &val);
+
+ if (ret < 0)
+ return ret;
+ else
+ return val;
+}
+
+static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short val)
+{
+ return regmap_write(wm8994->regmap, reg, val);
+}
+
+static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
+ int count, u16 *buf)
+{
+ return regmap_bulk_read(wm8994->regmap, reg, buf, count);
+}
+
+static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+ int count, const u16 *buf)
+{
+ return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
+}
+
+static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short mask, unsigned short val)
+{
+ return regmap_update_bits(wm8994->regmap, reg, mask, val);
+}
/* Helper to save on boilerplate */
static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 8752dbbc6135..2e5b194b9b19 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -17,10 +17,13 @@
#define PHY_ID_KSZ8873MLL 0x000e7237
#define PHY_ID_KSZ9021 0x00221610
+#define PHY_ID_KSZ9021RLRN 0x00221611
#define PHY_ID_KS8737 0x00221720
#define PHY_ID_KSZ8021 0x00221555
#define PHY_ID_KSZ8031 0x00221556
#define PHY_ID_KSZ8041 0x00221510
+/* undocumented */
+#define PHY_ID_KSZ8041RNLI 0x00221537
#define PHY_ID_KSZ8051 0x00221550
/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
#define PHY_ID_KSZ8001 0x0022161A
@@ -35,4 +38,9 @@
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
+#define MICREL_KSZ9021_EXTREG_CTRL 0xB
+#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
+#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
+#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a405d3dc0f61..f5096b58b20d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -41,8 +41,6 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, enum migrate_mode mode, int reason);
-extern int migrate_huge_page(struct page *, new_page_t x,
- unsigned long private, enum migrate_mode mode);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -55,6 +53,9 @@ extern int migrate_vmas(struct mm_struct *mm,
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
+extern int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ struct buffer_head *head, enum migrate_mode mode);
#else
static inline void putback_lru_pages(struct list_head *l) {}
@@ -62,9 +63,6 @@ static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, enum migrate_mode mode, int reason)
{ return -ENOSYS; }
-static inline int migrate_huge_page(struct page *page, new_page_t x,
- unsigned long private, enum migrate_mode mode)
- { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
@@ -92,11 +90,12 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
-extern int migrate_misplaced_page(struct page *page, int node);
-extern int migrate_misplaced_page(struct page *page, int node);
+extern int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
-static inline int migrate_misplaced_page(struct page *page, int node)
+static inline int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 09c2300ddb37..f7eaf2d60083 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -31,6 +31,7 @@
#define I2O_MINOR 166
#define MICROCODE_MINOR 184
#define TUN_MINOR 200
+#define CUSE_MINOR 203
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define MPT2SAS_MINOR 221
@@ -45,6 +46,7 @@
#define MAPPER_CTRL_MINOR 236
#define LOOP_CTRL_MINOR 237
#define VHOST_NET_MINOR 238
+#define UHID_MINOR 239
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index cd1fdf75103b..8df61bc5da00 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,10 +154,6 @@ enum {
MLX4_CMD_QUERY_IF_STAT = 0X54,
MLX4_CMD_SET_IF_STAT = 0X55,
- /* set port opcode modifiers */
- MLX4_SET_PORT_PRIO2TC = 0x8,
- MLX4_SET_PORT_SCHEDULER = 0x9,
-
/* register/delete flow steering network rules */
MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@ enum {
MLX4_SET_PORT_VLAN_TABLE = 0x3,
MLX4_SET_PORT_PRIO_MAP = 0x4,
MLX4_SET_PORT_GID_TABLE = 0x5,
+ MLX4_SET_PORT_PRIO2TC = 0x8,
+ MLX4_SET_PORT_SCHEDULER = 0x9,
};
enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 24ce6bdd540e..7d3a523160ba 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -54,6 +54,7 @@ enum {
MLX4_FLAG_MASTER = 1 << 2,
MLX4_FLAG_SLAVE = 1 << 3,
MLX4_FLAG_SRIOV = 1 << 4,
+ MLX4_FLAG_OLD_REG_MAC = 1 << 6,
};
enum {
@@ -155,7 +156,7 @@ enum {
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
- MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4,
+ MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4,
MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
@@ -640,16 +641,28 @@ struct mlx4_counter {
__be64 tx_bytes;
};
+struct mlx4_quotas {
+ int qp;
+ int cq;
+ int srq;
+ int mpt;
+ int mtt;
+ int counter;
+ int xrcd;
+};
+
struct mlx4_dev {
struct pci_dev *pdev;
unsigned long flags;
unsigned long num_slaves;
struct mlx4_caps caps;
struct mlx4_phys_caps phys_caps;
+ struct mlx4_quotas quotas;
struct radix_tree_root qp_table_tree;
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
int num_vfs;
+ int numa_node;
int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
@@ -771,6 +784,12 @@ static inline int mlx4_is_master(struct mlx4_dev *dev)
return dev->flags & MLX4_FLAG_MASTER;
}
+static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
+{
+ return dev->phys_caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
+}
+
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{
return (qpn < dev->phys_caps.base_sqpn + 8 +
@@ -816,7 +835,7 @@ void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -1078,7 +1097,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 68029b30c3dc..da78875807fc 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -181,7 +181,7 @@ enum {
MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46,
+ MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
};
enum {
@@ -230,6 +230,15 @@ enum {
MLX5_MAX_PAGE_SHIFT = 31
};
+enum {
+ MLX5_ADAPTER_PAGE_SHIFT = 12
+};
+
+enum {
+ MLX5_CAP_OFF_DCT = 41,
+ MLX5_CAP_OFF_CMDIF_CSUM = 46,
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -319,9 +328,9 @@ struct mlx5_hca_cap {
u8 rsvd25[42];
__be16 log_uar_page_sz;
u8 rsvd26[28];
- u8 log_msx_atomic_size_qp;
+ u8 log_max_atomic_size_qp;
u8 rsvd27[2];
- u8 log_msx_atomic_size_dc;
+ u8 log_max_atomic_size_dc;
u8 rsvd28[76];
};
@@ -417,7 +426,7 @@ struct mlx5_init_seg {
struct health_buffer health;
__be32 rsvd2[884];
__be32 health_counter;
- __be32 rsvd3[1023];
+ __be32 rsvd3[1019];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 8888381fc150..554548cd3dd4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -82,7 +82,7 @@ enum {
};
enum {
- MLX5_MAX_EQ_NAME = 20
+ MLX5_MAX_EQ_NAME = 32
};
enum {
@@ -483,6 +483,7 @@ struct mlx5_priv {
struct rb_root page_root;
int fw_pages;
int reg_pages;
+ struct list_head free_list;
struct mlx5_core_health health;
@@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
struct mlx5_cmd_msg *in;
struct mlx5_cmd_msg *out;
+ void *uout;
+ int uout_size;
mlx5_cmd_cbk_t callback;
void *context;
- int idx;
+ int idx;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent {
u8 token;
struct timespec ts1;
struct timespec ts2;
+ u16 op;
};
struct mlx5_pas {
@@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
@@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
- struct mlx5_create_mkey_mbox_in *in, int inlen);
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
struct mlx5_query_mkey_mbox_out *out, int outlen);
@@ -745,10 +754,14 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
return mkey_idx << 8;
}
+static inline u8 mlx5_mkey_variant(u32 mkey)
+{
+ return mkey & 0xff;
+}
+
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
};
enum {
@@ -758,7 +771,6 @@ enum {
struct mlx5_profile {
u64 mask;
u32 log_max_qp;
- int cmdif_csum;
struct {
int size;
int limit;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d2d59b4149d0..1cedd000cf29 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -50,6 +50,10 @@ extern int sysctl_legacy_va_layout;
#include <asm/pgtable.h>
#include <asm/processor.h>
+#ifndef __pa_symbol
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
+
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
@@ -115,6 +119,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
+#else
+# define VM_SOFTDIRTY 0
+#endif
+
#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
@@ -170,6 +180,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x40 /* second try */
+#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -290,12 +301,26 @@ static inline int put_page_testzero(struct page *page)
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
*/
static inline int get_page_unless_zero(struct page *page)
{
return atomic_inc_not_zero(&page->_count);
}
+/*
+ * Try to drop a ref unless the page has a refcount of one, return false if
+ * that is the case.
+ * This is to make sure that the refcount won't become zero after this drop.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int put_page_unless_one(struct page *page)
+{
+ return atomic_add_unless(&page->_count, -1, 1);
+}
+
extern int page_is_ram(unsigned long pfn);
/* Support for virtually mapped pages */
@@ -489,20 +514,6 @@ static inline int compound_order(struct page *page)
return (unsigned long)page[1].lru.prev;
}
-static inline int compound_trans_order(struct page *page)
-{
- int order;
- unsigned long flags;
-
- if (!PageHead(page))
- return 0;
-
- flags = compound_lock_irqsave(page);
- order = compound_order(page);
- compound_unlock_irqrestore(page, flags);
- return order;
-}
-
static inline void set_compound_order(struct page *page, unsigned long order)
{
page[1].lru.prev = (void *)order;
@@ -588,11 +599,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
* sets it, so none of the operations on it need to be atomic.
*/
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@@ -602,7 +613,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
@@ -624,7 +635,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
@@ -637,12 +648,12 @@ static inline enum zone_type page_zonenum(const struct page *page)
#endif
/*
- * The identification function is only used by the buddy allocator for
- * determining if two pages could be buddies. We are not really
- * identifying a zone since we could be using a the section number
- * id if we have not node id available in page flags.
- * We guarantee only that it will return the same value for two
- * combinable pages in a zone.
+ * The identification function is mainly used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really identifying
+ * the zone since we could be using the section number id if we do not have
+ * node id available in page flags.
+ * We only guarantee that it will return the same value for two combinable
+ * pages in a zone.
*/
static inline int page_zone_id(struct page *page)
{
@@ -668,51 +679,117 @@ static inline int page_to_nid(const struct page *page)
#endif
#ifdef CONFIG_NUMA_BALANCING
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
-static inline int page_nid_xchg_last(struct page *page, int nid)
+static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
- return xchg(&page->_last_nid, nid);
+ return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
}
-static inline int page_nid_last(struct page *page)
+static inline int cpupid_to_pid(int cpupid)
{
- return page->_last_nid;
+ return cpupid & LAST__PID_MASK;
}
-static inline void page_nid_reset_last(struct page *page)
+
+static inline int cpupid_to_cpu(int cpupid)
{
- page->_last_nid = -1;
+ return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
}
-#else
-static inline int page_nid_last(struct page *page)
+
+static inline int cpupid_to_nid(int cpupid)
{
- return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
+ return cpu_to_node(cpupid_to_cpu(cpupid));
}
-extern int page_nid_xchg_last(struct page *page, int nid);
+static inline bool cpupid_pid_unset(int cpupid)
+{
+ return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
+}
+
+static inline bool cpupid_cpu_unset(int cpupid)
+{
+ return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
+}
+
+static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
+{
+ return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
+}
-static inline void page_nid_reset_last(struct page *page)
+#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
- int nid = (1 << LAST_NID_SHIFT) - 1;
+ return xchg(&page->_last_cpupid, cpupid);
+}
- page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
- page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+static inline int page_cpupid_last(struct page *page)
+{
+ return page->_last_cpupid;
+}
+static inline void page_cpupid_reset_last(struct page *page)
+{
+ page->_last_cpupid = -1;
}
-#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
#else
-static inline int page_nid_xchg_last(struct page *page, int nid)
+static inline int page_cpupid_last(struct page *page)
{
- return page_to_nid(page);
+ return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-static inline int page_nid_last(struct page *page)
+extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+
+static inline void page_cpupid_reset_last(struct page *page)
{
- return page_to_nid(page);
+ int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
+
+ page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+ page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+}
+#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+#else /* !CONFIG_NUMA_BALANCING */
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+{
+ return page_to_nid(page); /* XXX */
}
-static inline void page_nid_reset_last(struct page *page)
+static inline int page_cpupid_last(struct page *page)
{
+ return page_to_nid(page); /* XXX */
}
-#endif
+
+static inline int cpupid_to_nid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_pid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_cpu(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpu_pid_to_cpupid(int nid, int pid)
+{
+ return -1;
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
+{
+ return 1;
+}
+
+static inline void page_cpupid_reset_last(struct page *page)
+{
+}
+
+static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
+{
+ return false;
+}
+#endif /* CONFIG_NUMA_BALANCING */
static inline struct zone *page_zone(const struct page *page)
{
@@ -884,11 +961,12 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
+#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_HWPOISON_LARGE)
+ VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -992,7 +1070,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
-extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
@@ -1238,32 +1316,76 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
-#if USE_SPLIT_PTLOCKS
-/*
- * We tuck a spinlock to guard each pagetable page into its struct page,
- * at page->private, with BUILD_BUG_ON to make sure that this will not
- * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
- * When freeing, reset page->mapping so free_pages_check won't complain.
- */
-#define __pte_lockptr(page) &((page)->ptl)
-#define pte_lock_init(_page) do { \
- spin_lock_init(__pte_lockptr(_page)); \
-} while (0)
-#define pte_lock_deinit(page) ((page)->mapping = NULL)
-#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
-#else /* !USE_SPLIT_PTLOCKS */
+#if USE_SPLIT_PTE_PTLOCKS
+#if BLOATED_SPINLOCKS
+extern bool ptlock_alloc(struct page *page);
+extern void ptlock_free(struct page *page);
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+ return page->ptl;
+}
+#else /* BLOATED_SPINLOCKS */
+static inline bool ptlock_alloc(struct page *page)
+{
+ return true;
+}
+
+static inline void ptlock_free(struct page *page)
+{
+}
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+ return &page->ptl;
+}
+#endif /* BLOATED_SPINLOCKS */
+
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return ptlock_ptr(pmd_page(*pmd));
+}
+
+static inline bool ptlock_init(struct page *page)
+{
+ /*
+ * prep_new_page() initialize page->private (and therefore page->ptl)
+ * with 0. Make sure nobody took it in use in between.
+ *
+ * It can happen if arch try to use slab for page table allocation:
+ * slab code uses page->slab_cache and page->first_page (for tail
+ * pages), which share storage with page->ptl.
+ */
+ VM_BUG_ON(*(unsigned long *)&page->ptl);
+ if (!ptlock_alloc(page))
+ return false;
+ spin_lock_init(ptlock_ptr(page));
+ return true;
+}
+
+/* Reset page->mapping so free_pages_check won't complain. */
+static inline void pte_lock_deinit(struct page *page)
+{
+ page->mapping = NULL;
+ ptlock_free(page);
+}
+
+#else /* !USE_SPLIT_PTE_PTLOCKS */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
-#define pte_lock_init(page) do {} while (0)
-#define pte_lock_deinit(page) do {} while (0)
-#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
-#endif /* USE_SPLIT_PTLOCKS */
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return &mm->page_table_lock;
+}
+static inline bool ptlock_init(struct page *page) { return true; }
+static inline void pte_lock_deinit(struct page *page) {}
+#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline void pgtable_page_ctor(struct page *page)
+static inline bool pgtable_page_ctor(struct page *page)
{
- pte_lock_init(page);
inc_zone_page_state(page, NR_PAGETABLE);
+ return ptlock_init(page);
}
static inline void pgtable_page_dtor(struct page *page)
@@ -1300,6 +1422,52 @@ static inline void pgtable_page_dtor(struct page *page)
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
NULL: pte_offset_kernel(pmd, address))
+#if USE_SPLIT_PMD_PTLOCKS
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return ptlock_ptr(virt_to_page(pmd));
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ page->pmd_huge_pte = NULL;
+#endif
+ return ptlock_init(page);
+}
+
+static inline void pgtable_pmd_page_dtor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON(page->pmd_huge_pte);
+#endif
+ ptlock_free(page);
+}
+
+#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
+
+#else
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return &mm->page_table_lock;
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
+static inline void pgtable_pmd_page_dtor(struct page *page) {}
+
+#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
+
+#endif
+
+static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
+{
+ spinlock_t *ptl = pmd_lockptr(mm, pmd);
+ spin_lock(ptl);
+ return ptl;
+}
+
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1397ccf81e91..cf55945c83fb 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,6 +2,7 @@
#define LINUX_MM_INLINE_H
#include <linux/huge_mm.h>
+#include <linux/swap.h>
/**
* page_is_file_cache - should the page be on a file LRU or anon LRU?
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index faf4b7c1ad12..bd299418a934 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,7 +23,9 @@
struct address_space;
-#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
+ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
/*
* Each physical page in the system has a struct page associated with
@@ -42,18 +44,22 @@ struct page {
/* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
+ union {
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ void *s_mem; /* slab first object */
+ };
+
/* Second double word */
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub/slob first free object */
+ void *freelist; /* sl[aou]b first free object */
bool pfmemalloc; /* If set by the page allocator,
* ALLOC_NO_WATERMARKS was set
* and the low watermark was not
@@ -109,6 +115,7 @@ struct page {
};
atomic_t _count; /* Usage count, see below. */
};
+ unsigned int active; /* SLAB */
};
};
@@ -130,6 +137,12 @@ struct page {
struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+#endif
};
/* Remainder is not double word aligned */
@@ -141,9 +154,13 @@ struct page {
* indicates order in the buddy
* system if PG_buddy is set.
*/
-#if USE_SPLIT_PTLOCKS
+#if USE_SPLIT_PTE_PTLOCKS
+#if BLOATED_SPINLOCKS
+ spinlock_t *ptl;
+#else
spinlock_t ptl;
#endif
+#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};
@@ -174,8 +191,8 @@ struct page {
void *shadow;
#endif
-#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
- int _last_nid;
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
#endif
}
/*
@@ -309,19 +326,20 @@ enum {
NR_MM_COUNTERS
};
-#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
#define SPLIT_RSS_COUNTING
/* per-thread cached information, */
struct task_rss_stat {
int events; /* for synchronization threshold */
int count[NR_MM_COUNTERS];
};
-#endif /* USE_SPLIT_PTLOCKS */
+#endif /* USE_SPLIT_PTE_PTLOCKS */
struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};
+struct kioctx_table;
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
@@ -338,6 +356,7 @@ struct mm_struct {
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+ atomic_long_t nr_ptes; /* Page table pages */
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some counters */
@@ -359,7 +378,6 @@ struct mm_struct {
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long def_flags;
- unsigned long nr_ptes; /* Page table pages */
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
@@ -383,8 +401,8 @@ struct mm_struct {
struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct hlist_head ioctx_list;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MM_OWNER
/*
@@ -405,7 +423,7 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_mm *mmu_notifier_mm;
#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -419,28 +437,15 @@ struct mm_struct {
*/
unsigned long numa_next_scan;
- /* numa_next_reset is when the PTE scanner period will be reset */
- unsigned long numa_next_reset;
-
/* Restart point for scanning and setting pte_numa */
unsigned long numa_scan_offset;
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
-
- /*
- * The first node a task was scheduled on. If a task runs on
- * a different node than Make PTE Scan Go Now.
- */
- int first_nid;
#endif
struct uprobes_state uprobes_state;
};
-/* first nid will either be a valid NID or one of these values */
-#define NUMA_PTE_SCAN_INIT -1
-#define NUMA_PTE_SCAN_ACTIVE -2
-
static inline void mm_init_cpumask(struct mm_struct *mm)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 92dc257251e4..7f7f8dae4b1d 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -87,4 +87,6 @@ calc_vm_flag_bits(unsigned long flags)
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
}
+
+unsigned long vm_commit_limit(void);
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 842de3e21e70..176fdf824b14 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -240,6 +240,7 @@ struct mmc_part {
struct mmc_card {
struct mmc_host *host; /* the host this device belongs to */
struct device dev; /* the device */
+ u32 ocr; /* the current OCR setting */
unsigned int rca; /* relative card address of device */
unsigned int type; /* card type */
#define MMC_TYPE_MMC 0 /* MMC card */
@@ -257,6 +258,7 @@ struct mmc_card {
#define MMC_CARD_REMOVED (1<<7) /* card has been removed */
#define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */
#define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */
+#define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */
unsigned int quirks; /* card quirks */
#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
@@ -420,10 +422,10 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
#define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
-#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
@@ -432,11 +434,12 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR)
#define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
-#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED)
#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
/*
* Quirk add/remove for MMC products.
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 443243b241d5..87079fc38011 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -151,7 +151,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
struct mmc_command *, int);
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
-extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
+extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
+ bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
@@ -188,7 +189,6 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
extern void mmc_release_host(struct mmc_host *host);
-extern int mmc_try_claim_host(struct mmc_host *host);
extern void mmc_get_card(struct mmc_card *card);
extern void mmc_put_card(struct mmc_card *card);
@@ -208,6 +208,8 @@ static inline void mmc_claim_host(struct mmc_host *host)
__mmc_claim_host(host, NULL);
}
+struct device_node;
extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 198f0fa44e9f..6ce7d2cd3c7a 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -15,6 +15,7 @@
#define LINUX_MMC_DW_MMC_H
#include <linux/scatterlist.h>
+#include <linux/mmc/core.h>
#define MAX_MCI_SLOTS 2
@@ -129,6 +130,9 @@ struct dw_mci {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
+ struct mmc_command stop_abort;
+ unsigned int prev_blksz;
+ unsigned char timing;
struct workqueue_struct *card_workqueue;
/* DMA interface members*/
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3b0c33ae13e1..99f5709ac343 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -254,6 +254,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
+#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -309,7 +310,6 @@ struct mmc_host {
spinlock_t lock; /* lock for claim and bus ops */
struct mmc_ios ios; /* current io bus settings */
- u32 ocr; /* the current OCR setting */
/* group bitfields together to minimize padding */
unsigned int use_spi_crc:1;
@@ -382,9 +382,6 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
-int mmc_suspend_host(struct mmc_host *);
-int mmc_resume_host(struct mmc_host *);
-
int mmc_power_save_host(struct mmc_host *host);
int mmc_power_restore_host(struct mmc_host *host);
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index e3c6a74d980a..3e781b8c0be7 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -171,6 +171,7 @@ struct sdhci_host {
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
unsigned int ocr_avail_mmc;
+ u32 ocr_mask; /* available voltages */
wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */
unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index e7d5dd67bb74..ccd8fb2cad52 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/sh_dma.h>
/*
* MMCIF : CE_CLK_CTRL [19:16]
@@ -33,12 +32,12 @@
*/
struct sh_mmcif_plat_data {
- void (*set_pwr)(struct platform_device *pdev, int state);
- void (*down_pwr)(struct platform_device *pdev);
int (*get_cd)(struct platform_device *pdef);
unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
unsigned int slave_id_rx;
bool use_cd_gpio : 1;
+ bool ccs_unsupported : 1;
+ bool clk_ctrl2_present : 1;
unsigned int cd_gpio;
u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
unsigned long caps;
@@ -62,6 +61,7 @@ struct sh_mmcif_plat_data {
#define MMCIF_CE_INT_MASK 0x00000044
#define MMCIF_CE_HOST_STS1 0x00000048
#define MMCIF_CE_HOST_STS2 0x0000004C
+#define MMCIF_CE_CLK_CTRL2 0x00000070
#define MMCIF_CE_VERSION 0x0000007C
/* CE_BUF_ACC */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index b76bcf0621f6..68927ae50845 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -25,8 +25,6 @@ struct sh_mobile_sdhi_info {
unsigned long tmio_caps2;
u32 tmio_ocr_mask; /* available MMC voltages */
unsigned int cd_gpio;
- void (*set_pwr)(struct platform_device *pdev, int state);
- int (*get_cd)(struct platform_device *pdev);
/* callbacks for board specific setup code */
int (*init)(struct platform_device *pdev,
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 7d88d27bfafa..b0c73e4cacea 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -18,7 +18,8 @@ int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
void mmc_gpio_free_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
-int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio);
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce);
void mmc_gpio_free_cd(struct mmc_host *host);
#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index af4a3b77a8de..bd791e452ad7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -105,6 +105,7 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_ALLOC_BATCH,
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
NR_ACTIVE_ANON, /* " " " " " */
@@ -352,7 +353,6 @@ struct zone {
* free areas of different sizes
*/
spinlock_t lock;
- int all_unreclaimable; /* All pages pinned */
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
/* Set to true when the PG_migrate_skip bits should be cleared */
bool compact_blockskip_flush;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 329aa307cb77..45e921401b06 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -277,7 +277,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71
#define INPUT_DEVICE_ID_KEY_MAX 0x2ff
#define INPUT_DEVICE_ID_REL_MAX 0x0f
-#define INPUT_DEVICE_ID_ABS_MAX 0x4f
+#define INPUT_DEVICE_ID_ABS_MAX 0x3f
#define INPUT_DEVICE_ID_MSC_MAX 0x07
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
diff --git a/include/linux/module.h b/include/linux/module.h
index 05f2447f8c15..15cd6b1b211e 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -367,9 +367,6 @@ struct module
/* What modules do I depend on? */
struct list_head target_list;
- /* Who is waiting for us to be unloaded */
- struct task_struct *waiter;
-
/* Destruction function. */
void (*exit)(void);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 73005f9957ea..371d346fa270 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -48,6 +48,9 @@ struct mnt_namespace;
#define MNT_INTERNAL 0x4000
#define MNT_LOCK_READONLY 0x400000
+#define MNT_LOCKED 0x800000
+#define MNT_DOOMED 0x1000000
+#define MNT_SYNC_UMOUNT 0x2000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 391af8d11cce..e21f9d44307f 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -6,9 +6,9 @@
/* one msg_msg structure for each message */
struct msg_msg {
- struct list_head m_list;
- long m_type;
- int m_ts; /* message text size */
+ struct list_head m_list;
+ long m_type;
+ size_t m_ts; /* message text size */
struct msg_msgseg* next;
void *security;
/* the actual message follows immediately */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index ee66f3a12fb6..009b02481436 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -26,11 +26,11 @@ struct msi_desc {
struct {
__u8 is_msix : 1;
__u8 multiple: 3; /* log2 number of messages */
- __u8 maskbit : 1; /* mask-pending bit supported ? */
- __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
- __u16 entry_nr; /* specific enabled entry */
- unsigned default_irq; /* default pre-assigned irq */
+ __u8 maskbit : 1; /* mask-pending bit supported ? */
+ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
+ __u8 pos; /* Location of the msi capability */
+ __u16 entry_nr; /* specific enabled entry */
+ unsigned default_irq; /* default pre-assigned irq */
} msi_attrib;
u32 masked; /* mask bits */
@@ -51,12 +51,33 @@ struct msi_desc {
};
/*
- * The arch hook for setup up msi irqs
+ * The arch hooks to setup up msi irqs. Those functions are
+ * implemented as weak symbols so that they /can/ be overriden by
+ * architecture specific code if needed.
*/
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
+void arch_restore_msi_irqs(struct pci_dev *dev, int irq);
+
+void default_teardown_msi_irqs(struct pci_dev *dev);
+void default_restore_msi_irqs(struct pci_dev *dev, int irq);
+u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
+
+struct msi_chip {
+ struct module *owner;
+ struct device *dev;
+ struct device_node *of_node;
+ struct list_head list;
+
+ int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
+ struct msi_desc *desc);
+ void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
+ int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
+ int nvec, int type);
+};
#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 211ff67e8b0d..36bb6a503f19 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -91,10 +91,6 @@ struct nand_bbt_descr {
* with NAND_BBT_CREATE.
*/
#define NAND_BBT_CREATE_EMPTY 0x00000400
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000800
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00001000
/* Write bbt if neccecary */
#define NAND_BBT_WRITE 0x00002000
/* Read and write back block contents when writing bbt */
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index d6ed61ef451d..c8be32e9fc49 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -137,6 +137,7 @@ enum access_mode {
/**
* fsmc_nand_platform_data - platform specific NAND controller config
+ * @nand_timings: timing setup for the physical NAND interface
* @partitions: partition table for the platform, use a default fallback
* if this is NULL
* @nr_partitions: the number of partitions in the previous entry
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 4b02512e421c..5f487d776411 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -365,7 +365,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
bitpos = (map_bankwidth(map)-1-i)*8;
#endif
orig.x[0] &= ~(0xff << bitpos);
- orig.x[0] |= buf[i-start] << bitpos;
+ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
}
}
return orig;
@@ -384,7 +384,7 @@ static inline map_word map_word_ff(struct map_info *map)
if (map_bankwidth(map) < MAP_FF_LIMIT) {
int bw = 8 * map_bankwidth(map);
- r.x[0] = (1 << bw) - 1;
+ r.x[0] = (1UL << bw) - 1;
} else {
for (i=0; i<map_words(map); i++)
r.x[i] = ~0UL;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a5cf4e8d6818..8cc0e2fb6894 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -29,9 +29,6 @@
#include <asm/div64.h>
-#define MTD_CHAR_MAJOR 90
-#define MTD_BLOCK_MAJOR 31
-
#define MTD_ERASE_PENDING 0x01
#define MTD_ERASING 0x02
#define MTD_ERASE_SUSPEND 0x04
@@ -173,6 +170,9 @@ struct mtd_info {
/* ECC layout structure pointer - read only! */
struct nand_ecclayout *ecclayout;
+ /* the ecc step size. */
+ unsigned int ecc_step_size;
+
/* max number of correctible bit errors per ecc step */
unsigned int ecc_strength;
@@ -351,6 +351,11 @@ static inline int mtd_has_oob(const struct mtd_info *mtd)
return mtd->_read_oob && mtd->_write_oob;
}
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+ return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
return !!mtd->_block_isbad;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ab6363443ce8..9e6c8f9f306e 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -56,7 +56,7 @@ extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
* is supported now. If you add a chip with bigger oobsize/page
* adjust this accordingly.
*/
-#define NAND_MAX_OOBSIZE 640
+#define NAND_MAX_OOBSIZE 744
#define NAND_MAX_PAGESIZE 8192
/*
@@ -198,10 +198,15 @@ typedef enum {
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
/* Keep gcc happy */
struct nand_chip;
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+
/* ONFI timing mode, used in both asynchronous and synchronous mode */
#define ONFI_TIMING_MODE_0 (1 << 0)
#define ONFI_TIMING_MODE_1 (1 << 1)
@@ -217,6 +222,9 @@ struct nand_chip;
/* ONFI subfeature parameters length */
#define ONFI_SUBFEATURE_PARAM_LEN 4
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+
struct nand_onfi_params {
/* rev info and features block */
/* 'O' 'N' 'F' 'I' */
@@ -224,7 +232,10 @@ struct nand_onfi_params {
__le16 revision;
__le16 features;
__le16 opt_cmd;
- u8 reserved[22];
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
/* manufacturer information block */
char manufacturer[12];
@@ -281,6 +292,40 @@ struct nand_onfi_params {
#define ONFI_CRC_BASE 0x4F4E
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
/**
* struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
* @lock: protection lock
@@ -390,8 +435,8 @@ struct nand_buffers {
* @write_buf: [REPLACEABLE] write data from the buffer to the chip
* @read_buf: [REPLACEABLE] read data from the chip into the buffer
* @select_chip: [REPLACEABLE] select chip nr
- * @block_bad: [REPLACEABLE] check, if the block is bad
- * @block_markbad: [REPLACEABLE] mark the block bad
+ * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
+ * @block_markbad: [REPLACEABLE] mark a block bad
* @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
* ALE/CLE/nCE. Also used to write command and address
* @init_size: [BOARDSPECIFIC] hardwarespecific function for setting
@@ -433,7 +478,13 @@ struct nand_buffers {
* @badblockbits: [INTERN] minimum number of set bits in a good block's
* bad block marker position; i.e., BBM == 11110111b is
* not bad when badblockbits == 7
- * @cellinfo: [INTERN] MLC/multichip data from chip ident
+ * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
+ * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
+ * Minimum amount of bit errors per @ecc_step_ds guaranteed
+ * to be correctable. If unknown, set to zero.
+ * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
+ * also from the datasheet. It is the recommended ECC step
+ * size, if known; if unknown, set to zero.
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
@@ -448,7 +499,6 @@ struct nand_buffers {
* supported, 0 otherwise.
* @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
* @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @ecclayout: [REPLACEABLE] the default ECC placement scheme
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -509,7 +559,9 @@ struct nand_chip {
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
- uint8_t cellinfo;
+ uint8_t bits_per_cell;
+ uint16_t ecc_strength_ds;
+ uint16_t ecc_step_ds;
int badblockpos;
int badblockbits;
@@ -520,7 +572,6 @@ struct nand_chip {
uint8_t *oob_poi;
struct nand_hw_control *controller;
- struct nand_ecclayout *ecclayout;
struct nand_ecc_ctrl ecc;
struct nand_buffers *buffers;
@@ -576,6 +627,11 @@ struct nand_chip {
{ .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
.options = (opts) }
+#define NAND_ECC_INFO(_strength, _step) \
+ { .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
+
/**
* struct nand_flash_dev - NAND Flash Device ID Structure
* @name: a human-readable name of the NAND chip
@@ -593,6 +649,12 @@ struct nand_chip {
* @options: stores various chip bit options
* @id_len: The valid length of the @id.
* @oobsize: OOB size
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ * @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ * @ecc_step_ds in nand_chip{}, also from the datasheet.
+ * For example, the "4bit ECC for each 512Byte" can be set with
+ * NAND_ECC_INFO(4, 512).
*/
struct nand_flash_dev {
char *name;
@@ -609,6 +671,10 @@ struct nand_flash_dev {
unsigned int options;
uint16_t id_len;
uint16_t oobsize;
+ struct {
+ uint16_t strength_ds;
+ uint16_t step_ds;
+ } ecc;
};
/**
@@ -625,8 +691,8 @@ extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
-extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_default_bbt(struct mtd_info *mtd);
+extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt);
@@ -708,6 +774,12 @@ struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
return chip->priv;
}
+/* return the supported features. */
+static inline int onfi_feature(struct nand_chip *chip)
+{
+ return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
+}
+
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
@@ -724,4 +796,13 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+ return chip->bits_per_cell == 1;
+}
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ccd4260834c5..d3181936c138 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -15,8 +15,8 @@
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
-
#include <linux/atomic.h>
+#include <asm/processor.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -131,7 +131,7 @@ static inline int mutex_is_locked(struct mutex *lock)
}
/*
- * See kernel/mutex.c for detailed documentation of these APIs.
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/mutex-design.txt.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -175,8 +175,8 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
-#define arch_mutex_cpu_relax() cpu_relax()
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
#endif
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index cd09751c71a0..492de72560fa 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -16,7 +16,7 @@ struct nameidata {
struct path root;
struct inode *inode; /* path.dentry.d_inode */
unsigned int flags;
- unsigned seq;
+ unsigned seq, m_seq;
int last_type;
unsigned depth;
char *saved_names[MAX_NESTED_LINKS + 1];
@@ -58,7 +58,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
-extern int user_path_umountat(int, const char __user *, unsigned int, struct path *);
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
@@ -71,8 +70,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
-extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
- const char *, unsigned int, struct path *);
+extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
diff --git a/include/linux/net.h b/include/linux/net.h
index 4f27575ce1d6..69be3e6079c8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,6 +24,7 @@
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
+#include <linux/jump_label.h>
#include <uapi/linux/net.h>
struct poll_table_struct;
@@ -163,6 +164,14 @@ struct proto_ops {
#endif
int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len);
+ /* Notes for implementing recvmsg:
+ * ===============================
+ * msg->msg_namelen should get updated by the recvmsg handlers
+ * iff msg_name != NULL. It is by default 0 to prevent
+ * returning uninitialized memory to user space. The recvfrom
+ * handlers can assume that msg.msg_name is either NULL or has
+ * a minimum size of sizeof(struct sockaddr_storage).
+ */
int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags);
@@ -172,7 +181,7 @@ struct proto_ops {
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
- void (*set_peek_off)(struct sock *sk, int val);
+ int (*set_peek_off)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
@@ -195,27 +204,23 @@ enum {
SOCK_WAKE_URG,
};
-extern int sock_wake_async(struct socket *sk, int how, int band);
-extern int sock_register(const struct net_proto_family *fam);
-extern void sock_unregister(int family);
-extern int __sock_create(struct net *net, int family, int type, int proto,
- struct socket **res, int kern);
-extern int sock_create(int family, int type, int proto,
- struct socket **res);
-extern int sock_create_kern(int family, int type, int proto,
- struct socket **res);
-extern int sock_create_lite(int family, int type, int proto,
- struct socket **res);
-extern void sock_release(struct socket *sock);
-extern int sock_sendmsg(struct socket *sock, struct msghdr *msg,
- size_t len);
-extern int sock_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t size, int flags);
-extern struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+ struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
#define sockfd_put(sock) fput(sock->file)
-extern int net_ratelimit(void);
+int net_ratelimit(void);
#define net_ratelimited_function(function, ...) \
do { \
@@ -243,32 +248,52 @@ do { \
#define net_random() prandom_u32()
#define net_srandom(seed) prandom_seed((__force u32)(seed))
-extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
-extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
- struct kvec *vec, size_t num,
- size_t len, int flags);
-
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
- int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
- int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
- int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
- int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
- char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
- char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
- enum sock_shutdown_cmd how);
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *done_key);
+
+#ifdef HAVE_JUMP_LABEL
+#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
+#else /* !HAVE_JUMP_LABEL */
+#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#endif /* HAVE_JUMP_LABEL */
+
+#define net_get_random_once(buf, nbytes) \
+ ({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static struct static_key ___done_key = \
+ ___NET_RANDOM_STATIC_KEY_INIT; \
+ if (!static_key_true(&___done_key)) \
+ ___ret = __net_get_random_once(buf, \
+ nbytes, \
+ &___done, \
+ &___done_key); \
+ ___ret; \
+ })
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+ int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+ unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index a2a89a5c7be5..1005ebf17575 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,6 +42,8 @@ enum {
NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
NETIF_F_FSO_BIT, /* ... FCoE segmentation */
NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
+ NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
+ NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
@@ -60,6 +62,7 @@ enum {
NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
+ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
/*
* Add your fresh new feature above and remember to update
@@ -107,11 +110,14 @@ enum {
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
+#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9ff50c9a0009..d9a550bf3e8e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,8 +60,8 @@ struct wireless_dev;
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
- const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+ const struct ethtool_ops *ops);
/* hardware address assignment types */
#define NET_ADDR_PERM 0 /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
};
#define NETDEV_BOOT_SETUP_MAX 8
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n)
{
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
*
* Mark NAPI processing as complete.
*/
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
/**
* napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
* lookup @napi_id in napi_hash table
* must be called under rcu_read_lock()
*/
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
/**
* napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
*
* generate a new napi_id and store a @napi under it in napi_hash
*/
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
* Warning: caller must observe rcu grace period
* before freeing memory containing @napi
*/
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
@@ -483,6 +483,7 @@ extern void napi_hash_del(struct napi_struct *napi);
*/
static inline void napi_disable(struct napi_struct *n)
{
+ might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
@@ -664,8 +665,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
#ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
- u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+ u16 filter_id);
#endif
/* This structure contains an instance of an RX queue. */
@@ -950,17 +951,36 @@ struct netdev_phys_port_id {
* multiple net devices on single physical port.
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __u16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by vxlan to notiy a driver about the UDP port and socket
* address family that vxlan is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_del_vxlan_port)(struct net_device *dev,
- * sa_family_t sa_family, __u16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by vxlan to notify the driver about a UDP port and socket
* address family that vxlan is not listening to anymore. The operation
* is protected by the vxlan_net->sock_lock.
+ *
+ * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ * struct net_device *dev)
+ * Called by upper layer devices to accelerate switching or other
+ * station functionality into hardware. 'pdev is the lowerdev
+ * to use for the offload and 'dev' is the net device that will
+ * back the offload. Returns a pointer to the private structure
+ * the upper layer will maintain.
+ * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
+ * Called by upper layer device to delete the station created
+ * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
+ * the station and priv is the structure returned by the add
+ * operation.
+ * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
+ * struct net_device *dev,
+ * void *priv);
+ * Callback to use for xmit over the accelerated station. This
+ * is used in place of ndo_start_xmit on accelerated net
+ * devices.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1093,10 +1113,19 @@ struct net_device_ops {
struct netdev_phys_port_id *ppid);
void (*ndo_add_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
- __u16 port);
+ __be16 port);
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
- __u16 port);
+ __be16 port);
+
+ void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ struct net_device *dev);
+ void (*ndo_dfwd_del_station)(struct net_device *pdev,
+ void *priv);
+
+ netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev,
+ void *priv);
};
/*
@@ -1131,7 +1160,7 @@ struct net_device {
unsigned long mem_end; /* shared mem end */
unsigned long mem_start; /* shared mem start */
unsigned long base_addr; /* device I/O address */
- unsigned int irq; /* device IRQ number */
+ int irq; /* device IRQ number */
/*
* Some hardware also needs these fields, but they are not
@@ -1143,8 +1172,19 @@ struct net_device {
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
- struct list_head upper_dev_list; /* List of upper devices */
- struct list_head lower_dev_list;
+ struct list_head close_list;
+
+ /* directly linked devices, like slaves for bonding */
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } adj_list;
+
+ /* all linked devices, *including* neighbours */
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } all_adj_list;
/* currently active device features */
@@ -1183,6 +1223,7 @@ struct net_device {
/* Management operations */
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
+ const struct forwarding_accel_ops *fwd_ops;
/* Hardware header description */
const struct header_ops *header_ops;
@@ -1214,7 +1255,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
- unsigned char neigh_priv_len;
+ unsigned short neigh_priv_len;
unsigned short dev_id; /* Used to differentiate devices
* that share the same link
* layer address
@@ -1487,9 +1528,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
* Net namespace inlines
@@ -1673,8 +1714,8 @@ struct packet_offload {
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
struct netdev_notifier_info {
struct net_device *dev;
@@ -1697,9 +1738,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
return info->dev;
}
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+ struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
@@ -1754,54 +1795,53 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
-extern int netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
- const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void dev_add_pack(struct packet_type *pt);
-extern void dev_remove_pack(struct packet_type *pt);
-extern void __dev_remove_pack(struct packet_type *pt);
-extern void dev_add_offload(struct packet_offload *po);
-extern void dev_remove_offload(struct packet_offload *po);
-extern void __dev_remove_offload(struct packet_offload *po);
-
-extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
- unsigned short mask);
-extern struct net_device *dev_get_by_name(struct net *net, const char *name);
-extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
-extern int dev_alloc_name(struct net_device *dev, const char *name);
-extern int dev_open(struct net_device *dev);
-extern int dev_close(struct net_device *dev);
-extern void dev_disable_lro(struct net_device *dev);
-extern int dev_loopback_xmit(struct sk_buff *newskb);
-extern int dev_queue_xmit(struct sk_buff *skb);
-extern int register_netdevice(struct net_device *dev);
-extern void unregister_netdevice_queue(struct net_device *dev,
- struct list_head *head);
-extern void unregister_netdevice_many(struct list_head *head);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
+
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+ unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
-extern int netdev_refcnt_read(const struct net_device *dev);
-extern void free_netdev(struct net_device *dev);
-extern void synchronize_net(void);
-extern int init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void netdev_freemem(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
-extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int netdev_get_name(struct net *net, char *name, int ifindex);
-extern int dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
-extern int netpoll_trap(void);
+int netpoll_trap(void);
#endif
-extern int skb_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -1873,7 +1913,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
}
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
{
return register_gifconf(family, NULL);
@@ -1944,7 +1984,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
@@ -2101,6 +2141,15 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/**
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes
+ */
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
@@ -2130,6 +2179,16 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
#endif
}
+/**
+ * netdev_completed_queue - report bytes and packets completed by device
+ * @dev: network device
+ * @pkts: actual number of packets sent over the medium
+ * @bytes: actual number of bytes sent over the medium
+ *
+ * Report the number of bytes and packets transmitted by the network device
+ * hardware queue over the physical medium, @bytes must exactly match the
+ * @bytes amount passed to netdev_sent_queue()
+ */
static inline void netdev_completed_queue(struct net_device *dev,
unsigned int pkts, unsigned int bytes)
{
@@ -2144,6 +2203,13 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
#endif
}
+/**
+ * netdev_reset_queue - reset the packets and bytes count of a network device
+ * @dev_queue: network device
+ *
+ * Reset the bytes and packet count of a network device and clear the
+ * software flow control OFF bit for this network device
+ */
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
@@ -2238,11 +2304,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
}
#ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
- u16 index);
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index);
#else
static inline int netif_set_xps_queue(struct net_device *dev,
- struct cpumask *mask,
+ const struct cpumask *mask,
u16 index)
{
return 0;
@@ -2270,12 +2336,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
return dev->num_tx_queues > 1;
}
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
- unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
#ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
unsigned int rxq)
@@ -2302,28 +2366,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
}
#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
*/
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
/* Use this variant in places where it could be invoked
* from either hardware interrupt or other context, with hardware interrupts
* either disabled or enabled.
*/
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
-extern int netif_rx(struct sk_buff *skb);
-extern int netif_rx_ni(struct sk_buff *skb);
-extern int netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t napi_gro_receive(struct napi_struct *napi,
- struct sk_buff *skb);
-extern void napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
-extern gro_result_t napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -2331,40 +2394,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
-extern int netdev_rx_handler_register(struct net_device *dev,
- rx_handler_func_t *rx_handler,
- void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
-
-extern bool dev_valid_name(const char *name);
-extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int dev_get_flags(const struct net_device *);
-extern int __dev_change_flags(struct net_device *, unsigned int flags);
-extern int dev_change_flags(struct net_device *, unsigned int);
-extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int dev_change_name(struct net_device *, const char *);
-extern int dev_set_alias(struct net_device *, const char *, size_t);
-extern int dev_change_net_namespace(struct net_device *,
- struct net *, const char *);
-extern int dev_set_mtu(struct net_device *, int);
-extern void dev_set_group(struct net_device *, int);
-extern int dev_set_mac_address(struct net_device *,
- struct sockaddr *);
-extern int dev_change_carrier(struct net_device *,
- bool new_carrier);
-extern int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_port_id *ppid);
-extern int dev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev,
- struct netdev_queue *txq);
-extern int dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb);
+int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+ unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, void *accel_priv);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
/**
* dev_put - release reference to device
@@ -2397,9 +2456,9 @@ static inline void dev_hold(struct net_device *dev)
* kind of lower layer not just hardware media.
*/
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -2412,13 +2471,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -2486,9 +2545,9 @@ static inline bool netif_device_present(struct net_device *dev)
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
@@ -2697,119 +2756,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
-extern void ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
- void (*setup)(struct net_device *),
- unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *),
+ unsigned int txqs, unsigned int rxqs);
#define alloc_netdev(sizeof_priv, name, setup) \
alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
-extern int register_netdev(struct net_device *dev);
-extern void unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
- unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
- unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev,
- unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
/* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
/* Functions used for secondary unicast and multicast support */
-extern void dev_set_rx_mode(struct net_device *dev);
-extern void __dev_set_rx_mode(struct net_device *dev);
-extern int dev_set_promiscuity(struct net_device *dev, int inc);
-extern int dev_set_allmulti(struct net_device *dev, int inc);
-extern void netdev_state_change(struct net_device *dev);
-extern void netdev_notify_peers(struct net_device *dev);
-extern void netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
-extern void dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
- const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int bpf_jit_enable;
-extern bool netdev_has_upper_dev(struct net_device *dev,
- struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
- for (iter = &(dev)->upper_dev_list, \
- upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
- upper; \
- upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+ for (iter = &(dev)->all_adj_list.upper, \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+ updev; \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+ struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+ for (iter = (dev)->adj_list.lower.next, \
+ priv = netdev_lower_get_next_private(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+ for (iter = &(dev)->adj_list.lower, \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
- struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
- struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+ struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+ struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2831,30 +2909,42 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
/* rx skb timestamps */
-extern void net_enable_timestamp(void);
-extern void net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
#ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
#else
#define dev_proc_init() 0
#endif
-extern int netdev_class_create_file(struct class_attribute *class_attr);
-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+int netdev_class_create_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+
+static inline int netdev_class_create_file(struct class_attribute *class_attr)
+{
+ return netdev_class_create_file_ns(class_attr, NULL);
+}
+
+static inline void netdev_class_remove_file(struct class_attribute *class_attr)
+{
+ netdev_class_remove_file_ns(class_attr, NULL);
+}
extern struct kobj_ns_type_operations net_ns_type_operations;
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
static inline netdev_features_t netdev_get_wanted_features(
struct net_device *dev)
@@ -2918,6 +3008,11 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline bool netif_is_macvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN;
+}
+
static inline bool netif_is_bond_master(struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -2946,22 +3041,22 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
-extern __printf(3, 4)
+__printf(3, 4)
int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
int netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
@@ -3002,7 +3097,7 @@ do { \
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
- WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+ WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
/* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 708fe72ab913..2077489f9887 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -35,14 +35,15 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
result->all[3] = a1->all[3] & mask->all[3];
}
-extern int netfilter_init(void);
+int netfilter_init(void);
/* Largest hook number + 1 */
#define NF_MAX_HOOKS 8
struct sk_buff;
-typedef unsigned int nf_hookfn(unsigned int hooknum,
+struct nf_hook_ops;
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -52,12 +53,13 @@ struct nf_hook_ops {
struct list_head list;
/* User fills in from here down. */
- nf_hookfn *hook;
- struct module *owner;
- u_int8_t pf;
- unsigned int hooknum;
+ nf_hookfn *hook;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
- int priority;
+ int priority;
};
struct nf_sockopt_ops {
@@ -208,7 +210,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
/* Call this before modifying an existing packet: ensures it is
modifiable and linear to the point you care about (writable_len).
Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
@@ -269,8 +271,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum;
}
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
#include <net/flow.h>
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
struct nf_conn;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index d80e2753847c..c7174b816674 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -49,31 +49,68 @@ enum ip_set_feature {
/* Set extensions */
enum ip_set_extension {
- IPSET_EXT_NONE = 0,
- IPSET_EXT_BIT_TIMEOUT = 1,
+ IPSET_EXT_BIT_TIMEOUT = 0,
IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
- IPSET_EXT_BIT_COUNTER = 2,
+ IPSET_EXT_BIT_COUNTER = 1,
IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
-};
-
-/* Extension offsets */
-enum ip_set_offset {
- IPSET_OFFSET_TIMEOUT = 0,
- IPSET_OFFSET_COUNTER,
- IPSET_OFFSET_MAX,
+ IPSET_EXT_BIT_COMMENT = 2,
+ IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+ /* Mark set with an extension which needs to call destroy */
+ IPSET_EXT_BIT_DESTROY = 7,
+ IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
};
#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+ IPSET_EXT_ID_COUNTER = 0,
+ IPSET_EXT_ID_TIMEOUT,
+ IPSET_EXT_ID_COMMENT,
+ IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+ /* Destroy extension private data (can be NULL) */
+ void (*destroy)(void *ext);
+ enum ip_set_extension type;
+ enum ipset_cadt_flags flag;
+ /* Size and minimal alignment */
+ u8 len;
+ u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
struct ip_set_ext {
- unsigned long timeout;
u64 packets;
u64 bytes;
+ u32 timeout;
+ char *comment;
+};
+
+struct ip_set_counter {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+struct ip_set_comment {
+ char *str;
};
struct ip_set;
+#define ext_timeout(e, s) \
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s) \
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s) \
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+
+
typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
const struct ip_set_ext *ext,
struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@ struct ip_set_type {
u8 revision_min, revision_max;
/* Create set */
- int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+ int (*create)(struct net *net, struct ip_set *set,
+ struct nlattr *tb[], u32 flags);
/* Attribute policies */
const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@ struct ip_set {
u8 revision;
/* Extensions */
u8 extensions;
+ /* Default timeout value, if enabled */
+ u32 timeout;
+ /* Element data size */
+ size_t dsize;
+ /* Offsets to extensions in elements */
+ size_t offset[IPSET_EXT_ID_MAX];
/* The type specific data */
void *data;
};
-struct ip_set_counter {
- atomic64_t bytes;
- atomic64_t packets;
-};
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+ /* Check that the extension is enabled for the set and
+ * call it's destroy function for its extension part in data.
+ */
+ if (SET_WITH_COMMENT(set))
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+ ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+ u32 cadt_flags = 0;
+
+ if (SET_WITH_TIMEOUT(set))
+ if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(set->timeout))))
+ return -EMSGSIZE;
+ if (SET_WITH_COUNTER(set))
+ cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+ if (SET_WITH_COMMENT(set))
+ cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+
+ if (!cadt_flags)
+ return 0;
+ return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
static inline void
ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -247,13 +316,24 @@ ip_set_init_counter(struct ip_set_counter *counter,
atomic64_set(&(counter)->packets, (long long)(ext->packets));
}
+/* Netlink CB args */
+enum {
+ IPSET_CB_NET = 0,
+ IPSET_CB_DUMP,
+ IPSET_CB_INDEX,
+ IPSET_CB_ARG0,
+ IPSET_CB_ARG1,
+ IPSET_CB_ARG2,
+};
+
/* register and unregister set references */
-extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
-extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char *ip_set_name_byindex(ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(const char *name);
-extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
-extern void ip_set_nfnl_put(ip_set_id_t index);
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+ const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
/* API for iptables set match, and SET target */
@@ -272,6 +352,8 @@ extern void *ip_set_alloc(size_t size);
extern void ip_set_free(void *members);
extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+ size_t len);
extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext);
@@ -296,10 +378,12 @@ ip_set_eexist(int ret, u32 flags)
/* Match elements marked with nomatch */
static inline bool
-ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt)
+ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
{
return adt == IPSET_TEST &&
- ret == -ENOTEMPTY && ((flags >> 16) & IPSET_FLAG_NOMATCH);
+ (set->type->features & IPSET_TYPE_NOMATCH) &&
+ ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
+ (ret > 0 || ret == -ENOTEMPTY);
}
/* Check the NLA_F_NET_BYTEORDER flag */
@@ -387,13 +471,40 @@ bitmap_bytes(u32 a, u32 b)
}
#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
+
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active)
+{
+ if (SET_WITH_TIMEOUT(set)) {
+ unsigned long *timeout = ext_timeout(e, set);
+
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(active ? ip_set_timeout_get(timeout)
+ : *timeout)))
+ return -EMSGSIZE;
+ }
+ if (SET_WITH_COUNTER(set) &&
+ ip_set_put_counter(skb, ext_counter(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_COMMENT(set) &&
+ ip_set_put_comment(skb, ext_comment(e, set)))
+ return -EMSGSIZE;
+ return 0;
+}
-#define IP_SET_INIT_KEXT(skb, opt, map) \
+#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, \
- .timeout = ip_set_adt_opt_timeout(opt, map) }
+ .timeout = ip_set_adt_opt_timeout(opt, set) }
-#define IP_SET_INIT_UEXT(map) \
+#define IP_SET_INIT_UEXT(set) \
{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
- .timeout = (map)->timeout }
+ .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b) a##b
+#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644
index 000000000000..21217ea008d7
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+ return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+ const struct ip_set_ext *ext)
+{
+ size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+ if (unlikely(comment->str)) {
+ kfree(comment->str);
+ comment->str = NULL;
+ }
+ if (!len)
+ return;
+ if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+ len = IPSET_MAX_COMMENT_SIZE;
+ comment->str = kzalloc(len + 1, GFP_ATOMIC);
+ if (unlikely(!comment->str))
+ return;
+ strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+ if (!comment->str)
+ return 0;
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+ if (unlikely(!comment->str))
+ return;
+ kfree(comment->str);
+ comment->str = NULL;
+}
+
+#endif
+#endif
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 3aac04167ca7..83c2f9e0886c 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -23,8 +23,8 @@
/* Set is defined with timeout support: timeout value may be 0 */
#define IPSET_NO_TIMEOUT UINT_MAX
-#define ip_set_adt_opt_timeout(opt, map) \
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
+#define ip_set_adt_opt_timeout(opt, set) \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
static inline unsigned int
ip_set_timeout_uget(struct nlattr *tb)
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 127d0b90604f..275505792664 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
};
/* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index f381020eee92..858d9b214053 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
struct nf_conn;
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
- TransportAddress *taddr,
- union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+ TransportAddress *taddr, union nf_inet_addr *addr,
+ __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index 6a0664c0c451..ec2ffaf418c8 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index ba7f571a2b1c..d5af3c27fb7d 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -107,85 +107,93 @@ enum sdp_header_types {
SDP_HDR_MEDIA,
};
-extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
- unsigned int protoff, s16 off);
-extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *exp,
- unsigned int matchoff,
- unsigned int matchlen);
-extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- enum sdp_header_types type,
- enum sdp_header_types term,
- const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int matchoff,
- unsigned int matchlen,
- u_int16_t port);
-extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- unsigned int sdpoff,
- const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
- unsigned int protoff,
- unsigned int dataoff,
- const char **dptr,
- unsigned int *datalen,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp,
- unsigned int mediaoff,
- unsigned int medialen,
- union nf_inet_addr *rtp_addr);
-
-extern int ct_sip_parse_request(const struct nf_conn *ct,
- const char *dptr, unsigned int datalen,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
- enum sip_header_types type,
- unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
- unsigned int *dataoff, unsigned int datalen,
- enum sip_header_types type, int *in_header,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
- const char *name,
- unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
- unsigned int off, unsigned int datalen,
- const char *name,
- unsigned int *matchoff, unsigned int *matchen,
- unsigned int *val);
-
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
- unsigned int dataoff, unsigned int datalen,
+struct nf_nat_sip_hooks {
+ unsigned int (*msg)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen);
+
+ void (*seq_adjust)(struct sk_buff *skb,
+ unsigned int protoff, s16 off);
+
+ unsigned int (*expect)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *exp,
+ unsigned int matchoff,
+ unsigned int matchlen);
+
+ unsigned int (*sdp_addr)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
- unsigned int *matchoff, unsigned int *matchlen);
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_port)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ u_int16_t port);
+
+ unsigned int (*sdp_session)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_media)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp,
+ unsigned int mediaoff,
+ unsigned int medialen,
+ union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+ unsigned int datalen, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sip_header_types type, unsigned int *matchoff,
+ unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+ unsigned int *dataoff, unsigned int datalen,
+ enum sip_header_types type, int *in_header,
+ unsigned int *matchoff, unsigned int *matchlen,
+ union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int off, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sdp_header_types type,
+ enum sdp_header_types term,
+ unsigned int *matchoff, unsigned int *matchlen);
#endif /* __KERNEL__ */
#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cadb7402d7a7..28c74367e900 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,6 +14,9 @@ struct nfnl_callback {
int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[]);
+ int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
const struct nla_policy *policy; /* netlink attribute policy */
const u_int16_t attr_count; /* number of nlattr's */
};
@@ -23,22 +26,24 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ int (*commit)(struct sk_buff *skb);
+ int (*abort)(struct sk_buff *skb);
};
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
- u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
- unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
- u32 portid, int flags);
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+ u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+ unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+ int flags);
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index bb4bbc9b7a18..b2e85e59f760 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -6,8 +6,8 @@
struct nf_acct;
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index dd49566315c6..a3e215bb0241 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -229,50 +229,48 @@ struct xt_table_info {
#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
+ nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
-
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
-
-extern int xt_check_match(struct xt_mtchk_param *,
- unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
- unsigned int size, u_int8_t proto, bool inv_proto);
-
-extern struct xt_table *xt_register_table(struct net *net,
- const struct xt_table *table,
- struct xt_table_info *bootstrap,
- struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
-
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
- unsigned int num_counters,
- struct xt_table_info *newinfo,
- int *error);
-
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
- u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
- u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
- int target, int *err);
-
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
- const char *name);
-extern void xt_table_unlock(struct xt_table *t);
-
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
-
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+ const struct xt_table *table,
+ struct xt_table_info *bootstrap,
+ struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+ unsigned int num_counters,
+ struct xt_table_info *newinfo,
+ int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+ int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
/**
* xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
return ret;
}
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
#ifdef CONFIG_COMPAT
#include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
-
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
-
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
- void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
- void __user **dstptr, unsigned int *size);
-
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
- void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
- void __user **dstptr, unsigned int *size);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+ void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+ void __user **dstptr, unsigned int *size);
#endif /* CONFIG_COMPAT */
#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index dfb4d9e52bcb..8ab1c278b66d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
#define BRNF_PPPoE 0x20
/* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
{
if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
return 0;
}
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
/* Only used in br_device.c */
static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index dfaf116b3e81..6e4591bb54d4 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
#include <uapi/linux/netfilter_ipv4.h>
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 2d4df6ce043e..64dad1cc1a4b 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -11,12 +11,12 @@
#ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index f3c7c24bec1c..fbfdb9d8d3a7 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,7 +24,8 @@ struct netpoll {
struct net_device *dev;
char dev_name[IFNAMSIZ];
const char *name;
- void (*rx_hook)(struct netpoll *, int, char *, int);
+ void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
+ int offset, int len);
union inet_addr local_ip, remote_ip;
bool ipv6;
@@ -41,7 +42,7 @@ struct netpoll_info {
unsigned long rx_flags;
spinlock_t rx_lock;
struct semaphore dev_lock;
- struct list_head rx_np; /* netpolls that registered an rx_hook */
+ struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
struct sk_buff_head txq;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e36dee52f224..12c2cb947df5 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -118,6 +118,9 @@ Needs to be updated if more operations are defined in future.*/
#define FIRST_NFS4_OP OP_ACCESS
#define LAST_NFS4_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
+#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS42_OP OP_RECLAIM_COMPLETE
enum nfsstat4 {
NFS4_OK = 0,
@@ -395,7 +398,9 @@ enum lock_type4 {
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 17)
+#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
+#define FATTR4_WORD2_CHANGE_SECURITY_LABEL \
+ (1UL << 17)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -408,16 +413,6 @@ enum lock_type4 {
#define NFS4_VERSION 4
#define NFS4_MINOR_VERSION 0
-#if defined(CONFIG_NFS_V4_2)
-#define NFS4_MAX_MINOR_VERSION 2
-#else
-#if defined(CONFIG_NFS_V4_1)
-#define NFS4_MAX_MINOR_VERSION 1
-#else
-#define NFS4_MAX_MINOR_VERSION 0
-#endif /* CONFIG_NFS_V4_1 */
-#endif /* CONFIG_NFS_V4_2 */
-
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
@@ -460,6 +455,7 @@ enum {
NFSPROC4_CLNT_FS_LOCATIONS,
NFSPROC4_CLNT_RELEASE_LOCKOWNER,
NFSPROC4_CLNT_SECINFO,
+ NFSPROC4_CLNT_FSID_PRESENT,
/* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 7125cef74164..48997374eaf0 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -269,9 +269,13 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
-static inline int NFS_FSCACHE(const struct inode *inode)
+static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
{
- return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+#ifdef CONFIG_NFS_FSCACHE
+ return NFS_I(inode)->fscache;
+#else
+ return NULL;
+#endif
}
static inline __u64 NFS_FILEID(const struct inode *inode)
@@ -503,27 +507,10 @@ extern int nfs_mountpoint_expiry_timeout;
extern void nfs_release_automount_timer(void);
/*
- * linux/fs/nfs/nfs4proc.c
- */
-#ifdef CONFIG_NFS_V4_SECURITY_LABEL
-extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
-static inline void nfs4_label_free(struct nfs4_label *label)
-{
- if (label) {
- kfree(label->label);
- kfree(label);
- }
- return;
-}
-#else
-static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
-static inline void nfs4_label_free(void *label) {}
-#endif
-
-/*
* linux/fs/nfs/unlink.c
*/
extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
+extern void nfs_wait_on_sillyrename(struct dentry *dentry);
extern void nfs_block_sillyrename(struct dentry *dentry);
extern void nfs_unblock_sillyrename(struct dentry *dentry);
extern int nfs_sillyrename(struct inode *dir, struct dentry *dentry);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index d2212432c456..1150ea41b626 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -41,6 +41,7 @@ struct nfs_client {
#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
#define NFS_CS_MIGRATION 2 /* - transparent state migr */
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
+#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -56,6 +57,7 @@ struct nfs_client {
struct rpc_cred *cl_machine_cred;
#if IS_ENABLED(CONFIG_NFS_V4)
+ struct list_head cl_ds_clients; /* auth flavor data servers */
u64 cl_clientid; /* constant */
nfs4_verifier cl_confirm; /* Clientid verifier */
unsigned long cl_state;
@@ -77,6 +79,10 @@ struct nfs_client {
char cl_ipaddr[48];
u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
+ unsigned long cl_mig_gen;
+
+ /* NFSv4.0 transport blocking */
+ struct nfs4_slot_table *cl_slot_tbl;
/* The sequence id to use for the next CREATE_SESSION */
u32 cl_seqid;
@@ -87,6 +93,15 @@ struct nfs_client {
struct nfs41_server_owner *cl_serverowner;
struct nfs41_server_scope *cl_serverscope;
struct nfs41_impl_id *cl_implid;
+ /* nfs 4.1+ state protection modes: */
+ unsigned long cl_sp4_flags;
+#define NFS_SP4_MACH_CRED_MINIMAL 1 /* Minimal sp4_mach_cred - state ops
+ * must use machine cred */
+#define NFS_SP4_MACH_CRED_CLEANUP 2 /* CLOSE and LOCKU */
+#define NFS_SP4_MACH_CRED_SECINFO 3 /* SECINFO and SECINFO_NO_NAME */
+#define NFS_SP4_MACH_CRED_STATEID 4 /* TEST_STATEID and FREE_STATEID */
+#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
+#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_FSCACHE
@@ -134,7 +149,9 @@ struct nfs_server {
__u64 maxfilesize; /* maximum file size */
struct timespec time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
+ struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
+ struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
struct nfs_fscache_key *fscache_key; /* unique key for superblock */
@@ -174,6 +191,12 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+
+ unsigned long mig_gen;
+ unsigned long mig_status;
+#define NFS_MIG_IN_TRANSITION (1)
+#define NFS_MIG_FAILED (2)
+
void (*destroy)(struct nfs_server *);
atomic_t active; /* Keep trace of any activity to this server */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 8651574a305b..3ccfcecf8999 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -591,6 +591,13 @@ struct nfs_renameres {
struct nfs_fattr *new_fattr;
};
+/* parsed sec= options */
+#define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */
+struct nfs_auth_info {
+ unsigned int flavor_len;
+ rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS];
+};
+
/*
* Argument struct for decode_entry function
*/
@@ -1053,14 +1060,18 @@ struct nfs4_fs_locations {
struct nfs4_fs_locations_arg {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *dir_fh;
+ const struct nfs_fh *fh;
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ clientid4 clientid;
+ unsigned char migration:1, renew:1;
};
struct nfs4_fs_locations_res {
struct nfs4_sequence_res seq_res;
struct nfs4_fs_locations *fs_locations;
+ unsigned char migration:1, renew:1;
};
struct nfs4_secinfo4 {
@@ -1084,6 +1095,19 @@ struct nfs4_secinfo_res {
struct nfs4_secinfo_flavors *flavors;
};
+struct nfs4_fsid_present_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ clientid4 clientid;
+ unsigned char renew:1;
+};
+
+struct nfs4_fsid_present_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fh *fh;
+ unsigned char renew:1;
+};
+
#endif /* CONFIG_NFS_V4 */
struct nfstime4 {
@@ -1107,6 +1131,23 @@ struct pnfs_ds_commit_info {
struct pnfs_commit_bucket *buckets;
};
+#define NFS4_OP_MAP_NUM_LONGS \
+ DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
+#define NFS4_OP_MAP_NUM_WORDS \
+ (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
+struct nfs4_op_map {
+ union {
+ unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
+ u32 words[NFS4_OP_MAP_NUM_WORDS];
+ } u;
+};
+
+struct nfs41_state_protection {
+ u32 how;
+ struct nfs4_op_map enforce;
+ struct nfs4_op_map allow;
+};
+
#define NFS4_EXCHANGE_ID_LEN (48)
struct nfs41_exchange_id_args {
struct nfs_client *client;
@@ -1114,6 +1155,7 @@ struct nfs41_exchange_id_args {
unsigned int id_len;
char id[NFS4_EXCHANGE_ID_LEN];
u32 flags;
+ struct nfs41_state_protection state_protect;
};
struct nfs41_server_owner {
@@ -1146,6 +1188,7 @@ struct nfs41_exchange_id_res {
struct nfs41_server_owner *server_owner;
struct nfs41_server_scope *server_scope;
struct nfs41_impl_id *impl_id;
+ struct nfs41_state_protection state_protect;
};
struct nfs41_create_session_args {
@@ -1419,12 +1462,12 @@ struct nfs_rpc_ops {
void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
void (*read_pageio_init)(struct nfs_pageio_descriptor *, struct inode *,
const struct nfs_pgio_completion_ops *);
- void (*read_rpc_prepare)(struct rpc_task *, struct nfs_read_data *);
+ int (*read_rpc_prepare)(struct rpc_task *, struct nfs_read_data *);
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
void (*write_pageio_init)(struct nfs_pageio_descriptor *, struct inode *, int,
const struct nfs_pgio_completion_ops *);
- void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
+ int (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
@@ -1436,13 +1479,14 @@ struct nfs_rpc_ops {
struct inode * (*open_context) (struct inode *dir,
struct nfs_open_context *ctx,
int open_flags,
- struct iattr *iattr);
+ struct iattr *iattr,
+ int *);
int (*have_delegation)(struct inode *, fmode_t);
int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *
(*init_client) (struct nfs_client *, const struct rpc_timeout *,
- const char *, rpc_authflavor_t);
+ const char *);
void (*free_client) (struct nfs_client *);
struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index f451c8d6e231..26ebcf41c213 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1,6 +1,6 @@
/*
* Definitions for the NVM Express interface
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2013, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,7 +19,10 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
-#include <linux/types.h>
+#include <uapi/linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/miscdevice.h>
+#include <linux/kref.h>
struct nvme_bar {
__u64 cap; /* Controller Capabilities */
@@ -50,6 +53,7 @@ enum {
NVME_CC_SHN_NONE = 0 << 14,
NVME_CC_SHN_NORMAL = 1 << 14,
NVME_CC_SHN_ABRUPT = 2 << 14,
+ NVME_CC_SHN_MASK = 3 << 14,
NVME_CC_IOSQES = 6 << 16,
NVME_CC_IOCQES = 4 << 20,
NVME_CSTS_RDY = 1 << 0,
@@ -57,462 +61,11 @@ enum {
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
-};
-
-struct nvme_id_power_state {
- __le16 max_power; /* centiwatts */
- __u16 rsvd2;
- __le32 entry_lat; /* microseconds */
- __le32 exit_lat; /* microseconds */
- __u8 read_tput;
- __u8 read_lat;
- __u8 write_tput;
- __u8 write_lat;
- __u8 rsvd16[16];
+ NVME_CSTS_SHST_MASK = 3 << 2,
};
#define NVME_VS(major, minor) (major << 16 | minor)
-struct nvme_id_ctrl {
- __le16 vid;
- __le16 ssvid;
- char sn[20];
- char mn[40];
- char fr[8];
- __u8 rab;
- __u8 ieee[3];
- __u8 mic;
- __u8 mdts;
- __u8 rsvd78[178];
- __le16 oacs;
- __u8 acl;
- __u8 aerl;
- __u8 frmw;
- __u8 lpa;
- __u8 elpe;
- __u8 npss;
- __u8 rsvd264[248];
- __u8 sqes;
- __u8 cqes;
- __u8 rsvd514[2];
- __le32 nn;
- __le16 oncs;
- __le16 fuses;
- __u8 fna;
- __u8 vwc;
- __le16 awun;
- __le16 awupf;
- __u8 rsvd530[1518];
- struct nvme_id_power_state psd[32];
- __u8 vs[1024];
-};
-
-enum {
- NVME_CTRL_ONCS_COMPARE = 1 << 0,
- NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
- NVME_CTRL_ONCS_DSM = 1 << 2,
-};
-
-struct nvme_lbaf {
- __le16 ms;
- __u8 ds;
- __u8 rp;
-};
-
-struct nvme_id_ns {
- __le64 nsze;
- __le64 ncap;
- __le64 nuse;
- __u8 nsfeat;
- __u8 nlbaf;
- __u8 flbas;
- __u8 mc;
- __u8 dpc;
- __u8 dps;
- __u8 rsvd30[98];
- struct nvme_lbaf lbaf[16];
- __u8 rsvd192[192];
- __u8 vs[3712];
-};
-
-enum {
- NVME_NS_FEAT_THIN = 1 << 0,
- NVME_LBAF_RP_BEST = 0,
- NVME_LBAF_RP_BETTER = 1,
- NVME_LBAF_RP_GOOD = 2,
- NVME_LBAF_RP_DEGRADED = 3,
-};
-
-struct nvme_smart_log {
- __u8 critical_warning;
- __u8 temperature[2];
- __u8 avail_spare;
- __u8 spare_thresh;
- __u8 percent_used;
- __u8 rsvd6[26];
- __u8 data_units_read[16];
- __u8 data_units_written[16];
- __u8 host_reads[16];
- __u8 host_writes[16];
- __u8 ctrl_busy_time[16];
- __u8 power_cycles[16];
- __u8 power_on_hours[16];
- __u8 unsafe_shutdowns[16];
- __u8 media_errors[16];
- __u8 num_err_log_entries[16];
- __u8 rsvd192[320];
-};
-
-enum {
- NVME_SMART_CRIT_SPARE = 1 << 0,
- NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
- NVME_SMART_CRIT_RELIABILITY = 1 << 2,
- NVME_SMART_CRIT_MEDIA = 1 << 3,
- NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
-};
-
-struct nvme_lba_range_type {
- __u8 type;
- __u8 attributes;
- __u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
- __u8 guid[16];
- __u8 rsvd48[16];
-};
-
-enum {
- NVME_LBART_TYPE_FS = 0x01,
- NVME_LBART_TYPE_RAID = 0x02,
- NVME_LBART_TYPE_CACHE = 0x03,
- NVME_LBART_TYPE_SWAP = 0x04,
-
- NVME_LBART_ATTRIB_TEMP = 1 << 0,
- NVME_LBART_ATTRIB_HIDE = 1 << 1,
-};
-
-/* I/O commands */
-
-enum nvme_opcode {
- nvme_cmd_flush = 0x00,
- nvme_cmd_write = 0x01,
- nvme_cmd_read = 0x02,
- nvme_cmd_write_uncor = 0x04,
- nvme_cmd_compare = 0x05,
- nvme_cmd_dsm = 0x09,
-};
-
-struct nvme_common_command {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[2];
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le32 cdw10[6];
-};
-
-struct nvme_rw_command {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le32 reftag;
- __le16 apptag;
- __le16 appmask;
-};
-
-enum {
- NVME_RW_LR = 1 << 15,
- NVME_RW_FUA = 1 << 14,
- NVME_RW_DSM_FREQ_UNSPEC = 0,
- NVME_RW_DSM_FREQ_TYPICAL = 1,
- NVME_RW_DSM_FREQ_RARE = 2,
- NVME_RW_DSM_FREQ_READS = 3,
- NVME_RW_DSM_FREQ_WRITES = 4,
- NVME_RW_DSM_FREQ_RW = 5,
- NVME_RW_DSM_FREQ_ONCE = 6,
- NVME_RW_DSM_FREQ_PREFETCH = 7,
- NVME_RW_DSM_FREQ_TEMP = 8,
- NVME_RW_DSM_LATENCY_NONE = 0 << 4,
- NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
- NVME_RW_DSM_LATENCY_NORM = 2 << 4,
- NVME_RW_DSM_LATENCY_LOW = 3 << 4,
- NVME_RW_DSM_SEQ_REQ = 1 << 6,
- NVME_RW_DSM_COMPRESSED = 1 << 7,
-};
-
-struct nvme_dsm_cmd {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
- __le32 nr;
- __le32 attributes;
- __u32 rsvd12[4];
-};
-
-enum {
- NVME_DSMGMT_IDR = 1 << 0,
- NVME_DSMGMT_IDW = 1 << 1,
- NVME_DSMGMT_AD = 1 << 2,
-};
-
-struct nvme_dsm_range {
- __le32 cattr;
- __le32 nlb;
- __le64 slba;
-};
-
-/* Admin commands */
-
-enum nvme_admin_opcode {
- nvme_admin_delete_sq = 0x00,
- nvme_admin_create_sq = 0x01,
- nvme_admin_get_log_page = 0x02,
- nvme_admin_delete_cq = 0x04,
- nvme_admin_create_cq = 0x05,
- nvme_admin_identify = 0x06,
- nvme_admin_abort_cmd = 0x08,
- nvme_admin_set_features = 0x09,
- nvme_admin_get_features = 0x0a,
- nvme_admin_async_event = 0x0c,
- nvme_admin_activate_fw = 0x10,
- nvme_admin_download_fw = 0x11,
- nvme_admin_format_nvm = 0x80,
- nvme_admin_security_send = 0x81,
- nvme_admin_security_recv = 0x82,
-};
-
-enum {
- NVME_QUEUE_PHYS_CONTIG = (1 << 0),
- NVME_CQ_IRQ_ENABLED = (1 << 1),
- NVME_SQ_PRIO_URGENT = (0 << 1),
- NVME_SQ_PRIO_HIGH = (1 << 1),
- NVME_SQ_PRIO_MEDIUM = (2 << 1),
- NVME_SQ_PRIO_LOW = (3 << 1),
- NVME_FEAT_ARBITRATION = 0x01,
- NVME_FEAT_POWER_MGMT = 0x02,
- NVME_FEAT_LBA_RANGE = 0x03,
- NVME_FEAT_TEMP_THRESH = 0x04,
- NVME_FEAT_ERR_RECOVERY = 0x05,
- NVME_FEAT_VOLATILE_WC = 0x06,
- NVME_FEAT_NUM_QUEUES = 0x07,
- NVME_FEAT_IRQ_COALESCE = 0x08,
- NVME_FEAT_IRQ_CONFIG = 0x09,
- NVME_FEAT_WRITE_ATOMIC = 0x0a,
- NVME_FEAT_ASYNC_EVENT = 0x0b,
- NVME_FEAT_SW_PROGRESS = 0x0c,
- NVME_FWACT_REPL = (0 << 3),
- NVME_FWACT_REPL_ACTV = (1 << 3),
- NVME_FWACT_ACTV = (2 << 3),
-};
-
-struct nvme_identify {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
- __le32 cns;
- __u32 rsvd11[5];
-};
-
-struct nvme_features {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2[2];
- __le64 prp1;
- __le64 prp2;
- __le32 fid;
- __le32 dword11;
- __u32 rsvd12[4];
-};
-
-struct nvme_create_cq {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __u32 rsvd1[5];
- __le64 prp1;
- __u64 rsvd8;
- __le16 cqid;
- __le16 qsize;
- __le16 cq_flags;
- __le16 irq_vector;
- __u32 rsvd12[4];
-};
-
-struct nvme_create_sq {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __u32 rsvd1[5];
- __le64 prp1;
- __u64 rsvd8;
- __le16 sqid;
- __le16 qsize;
- __le16 sq_flags;
- __le16 cqid;
- __u32 rsvd12[4];
-};
-
-struct nvme_delete_queue {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __u32 rsvd1[9];
- __le16 qid;
- __u16 rsvd10;
- __u32 rsvd11[5];
-};
-
-struct nvme_download_firmware {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __u32 rsvd1[5];
- __le64 prp1;
- __le64 prp2;
- __le32 numd;
- __le32 offset;
- __u32 rsvd12[4];
-};
-
-struct nvme_format_cmd {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2[4];
- __le32 cdw10;
- __u32 rsvd11[5];
-};
-
-struct nvme_command {
- union {
- struct nvme_common_command common;
- struct nvme_rw_command rw;
- struct nvme_identify identify;
- struct nvme_features features;
- struct nvme_create_cq create_cq;
- struct nvme_create_sq create_sq;
- struct nvme_delete_queue delete_queue;
- struct nvme_download_firmware dlfw;
- struct nvme_format_cmd format;
- struct nvme_dsm_cmd dsm;
- };
-};
-
-enum {
- NVME_SC_SUCCESS = 0x0,
- NVME_SC_INVALID_OPCODE = 0x1,
- NVME_SC_INVALID_FIELD = 0x2,
- NVME_SC_CMDID_CONFLICT = 0x3,
- NVME_SC_DATA_XFER_ERROR = 0x4,
- NVME_SC_POWER_LOSS = 0x5,
- NVME_SC_INTERNAL = 0x6,
- NVME_SC_ABORT_REQ = 0x7,
- NVME_SC_ABORT_QUEUE = 0x8,
- NVME_SC_FUSED_FAIL = 0x9,
- NVME_SC_FUSED_MISSING = 0xa,
- NVME_SC_INVALID_NS = 0xb,
- NVME_SC_CMD_SEQ_ERROR = 0xc,
- NVME_SC_LBA_RANGE = 0x80,
- NVME_SC_CAP_EXCEEDED = 0x81,
- NVME_SC_NS_NOT_READY = 0x82,
- NVME_SC_CQ_INVALID = 0x100,
- NVME_SC_QID_INVALID = 0x101,
- NVME_SC_QUEUE_SIZE = 0x102,
- NVME_SC_ABORT_LIMIT = 0x103,
- NVME_SC_ABORT_MISSING = 0x104,
- NVME_SC_ASYNC_LIMIT = 0x105,
- NVME_SC_FIRMWARE_SLOT = 0x106,
- NVME_SC_FIRMWARE_IMAGE = 0x107,
- NVME_SC_INVALID_VECTOR = 0x108,
- NVME_SC_INVALID_LOG_PAGE = 0x109,
- NVME_SC_INVALID_FORMAT = 0x10a,
- NVME_SC_BAD_ATTRIBUTES = 0x180,
- NVME_SC_WRITE_FAULT = 0x280,
- NVME_SC_READ_ERROR = 0x281,
- NVME_SC_GUARD_CHECK = 0x282,
- NVME_SC_APPTAG_CHECK = 0x283,
- NVME_SC_REFTAG_CHECK = 0x284,
- NVME_SC_COMPARE_FAILED = 0x285,
- NVME_SC_ACCESS_DENIED = 0x286,
-};
-
-struct nvme_completion {
- __le32 result; /* Used by admin commands to return data */
- __u32 rsvd;
- __le16 sq_head; /* how much of this queue may be reclaimed */
- __le16 sq_id; /* submission queue that generated this entry */
- __u16 command_id; /* of the command which completed */
- __le16 status; /* did the command fail, and if so, why? */
-};
-
-struct nvme_user_io {
- __u8 opcode;
- __u8 flags;
- __u16 control;
- __u16 nblocks;
- __u16 rsvd;
- __u64 metadata;
- __u64 addr;
- __u64 slba;
- __u32 dsmgmt;
- __u32 reftag;
- __u16 apptag;
- __u16 appmask;
-};
-
-struct nvme_admin_cmd {
- __u8 opcode;
- __u8 flags;
- __u16 rsvd1;
- __u32 nsid;
- __u32 cdw2;
- __u32 cdw3;
- __u64 metadata;
- __u64 addr;
- __u32 metadata_len;
- __u32 data_len;
- __u32 cdw10;
- __u32 cdw11;
- __u32 cdw12;
- __u32 cdw13;
- __u32 cdw14;
- __u32 cdw15;
- __u32 timeout_ms;
- __u32 result;
-};
-
-#define NVME_IOCTL_ID _IO('N', 0x40)
-#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
-#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
-
-#ifdef __KERNEL__
-#include <linux/pci.h>
-#include <linux/miscdevice.h>
-#include <linux/kref.h>
-
#define NVME_IO_TIMEOUT (5 * HZ)
/*
@@ -553,7 +106,7 @@ struct nvme_ns {
struct request_queue *queue;
struct gendisk *disk;
- int ns_id;
+ unsigned ns_id;
int lba_shift;
int ms;
u64 mode_select_num_blocks;
@@ -572,6 +125,7 @@ struct nvme_iod {
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
+ unsigned long start_time;
dma_addr_t first_dma;
struct scatterlist sg[0];
};
@@ -613,6 +167,4 @@ struct sg_io_hdr;
int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
int nvme_sg_get_version_num(int __user *ip);
-#endif
-
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 3a45c4f593ad..276c546980d8 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
return of_read_number(cell, size);
}
+#if defined(CONFIG_SPARC)
#include <asm/prom.h>
+#endif
/* Default #address and #size cells. Allow arch asm/prom.h to override */
#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
@@ -226,6 +228,19 @@ static inline int of_get_child_count(const struct device_node *np)
return num;
}
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+/* cache lookup */
+extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
#define for_each_node_with_property(dn, prop_name) \
@@ -275,12 +290,16 @@ extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
extern struct device_node *of_parse_phandle(const struct device_node *np,
const char *phandle_name,
int index);
extern int of_parse_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
@@ -324,12 +343,6 @@ extern int of_detach_node(struct device_node *);
*/
const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
u32 *pu);
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- for (prop = of_find_property(np, propname, NULL), \
- p = of_prop_next_u32(prop, NULL, &u); \
- p; \
- p = of_prop_next_u32(prop, p, &u))
-
/*
* struct property *prop;
* const char *s;
@@ -338,11 +351,6 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
* printk("String value: %s\n", s);
*/
const char *of_prop_next_string(struct property *prop, const char *cur);
-#define of_property_for_each_string(np, propname, prop, s) \
- for (prop = of_find_property(np, propname, NULL), \
- s = of_prop_next_string(prop, NULL); \
- s; \
- s = of_prop_next_string(prop, s))
int of_device_is_stdout_path(struct device_node *dn);
@@ -372,6 +380,9 @@ static inline bool of_have_populated_dt(void)
#define for_each_child_of_node(parent, child) \
while (0)
+#define for_each_available_child_of_node(parent, child) \
+ while (0)
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
@@ -384,6 +395,11 @@ static inline int of_get_child_count(const struct device_node *np)
return 0;
}
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ return 0;
+}
+
static inline int of_device_is_compatible(const struct device_node *device,
const char *name)
{
@@ -497,6 +513,13 @@ static inline int of_parse_phandle_with_args(struct device_node *np,
return -ENOSYS;
}
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
static inline int of_count_phandle_with_args(struct device_node *np,
const char *list_name,
const char *cells_name)
@@ -519,21 +542,26 @@ static inline int of_device_is_stdout_path(struct device_node *dn)
return 0;
}
-#define of_match_ptr(_ptr) NULL
-#define of_match_node(_matches, _node) NULL
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- while (0)
-#define of_property_for_each_string(np, propname, prop, s) \
- while (0)
-#endif /* CONFIG_OF */
+static inline const __be32 *of_prop_next_u32(struct property *prop,
+ const __be32 *cur, u32 *pu)
+{
+ return NULL;
+}
-#ifndef of_node_to_nid
-static inline int of_node_to_nid(struct device_node *np)
+static inline const char *of_prop_next_string(struct property *prop,
+ const char *cur)
{
- return numa_node_id();
+ return NULL;
}
-#define of_node_to_nid of_node_to_nid
+#define of_match_ptr(_ptr) NULL
+#define of_match_node(_matches, _node) NULL
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
+extern int of_node_to_nid(struct device_node *np);
+#else
+static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
/**
@@ -573,6 +601,18 @@ static inline int of_property_read_u32(const struct device_node *np,
return of_property_read_u32_array(np, propname, out_value, 1);
}
+#define of_property_for_each_u32(np, propname, prop, p, u) \
+ for (prop = of_find_property(np, propname, NULL), \
+ p = of_prop_next_u32(prop, NULL, &u); \
+ p; \
+ p = of_prop_next_u32(prop, p, &u))
+
+#define of_property_for_each_string(np, propname, prop, s) \
+ for (prop = of_find_property(np, propname, NULL), \
+ s = of_prop_next_string(prop, NULL); \
+ s; \
+ s = of_prop_next_string(prop, s))
+
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 4c2e6f26432c..5f6ed6b182b8 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -34,6 +34,10 @@ static inline void of_pci_range_to_resource(struct of_pci_range *range,
res->name = np->full_name;
}
+/* Translate a DMA address from device space to CPU space */
+extern u64 of_translate_dma_address(struct device_node *dev,
+ const __be32 *in_addr);
+
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern bool of_can_translate_address(struct device_node *dev);
@@ -52,10 +56,7 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
-#ifndef pci_address_to_pio
-static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
-#define pci_address_to_pio pci_address_to_pio
-#endif
+extern unsigned long pci_address_to_pio(phys_addr_t addr);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -63,13 +64,6 @@ extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
#else /* CONFIG_OF_ADDRESS */
-#ifndef of_address_to_resource
-static inline int of_address_to_resource(struct device_node *dev, int index,
- struct resource *r)
-{
- return -EINVAL;
-}
-#endif
static inline struct device_node *of_find_matching_node_by_address(
struct device_node *from,
const struct of_device_id *matches,
@@ -77,12 +71,7 @@ static inline struct device_node *of_find_matching_node_by_address(
{
return NULL;
}
-#ifndef of_iomap
-static inline void __iomem *of_iomap(struct device_node *device, int index)
-{
- return NULL;
-}
-#endif
+
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{
@@ -103,6 +92,22 @@ static inline struct of_pci_range *of_pci_range_parser_one(
}
#endif /* CONFIG_OF_ADDRESS */
+#ifdef CONFIG_OF
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+void __iomem *of_iomap(struct device_node *node, int index);
+#else
+static inline int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ return -EINVAL;
+}
+
+static inline void __iomem *of_iomap(struct device_node *device, int index)
+{
+ return NULL;
+}
+#endif
#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index ed136ad698ce..0beaee9dac1f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -90,35 +90,36 @@ extern void *of_get_flat_dt_prop(unsigned long node, const char *name,
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern int of_flat_dt_match(unsigned long node, const char *const *matches);
extern unsigned long of_get_flat_dt_root(void);
+extern int of_scan_flat_dt_by_path(const char *path,
+ int (*it)(unsigned long node, const char *name, int depth, void *data),
+ void *data);
extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
int depth, void *data);
-extern void early_init_dt_check_for_initrd(unsigned long node);
extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
extern u64 dt_mem_next_cell(int s, __be32 **cellp);
-/*
- * If BLK_DEV_INITRD, the fdt early init code will call this function,
- * to be provided by the arch code. start and end are specified as
- * physical addresses.
- */
-#ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end);
-#endif
-
/* Early flat tree scan hooks */
extern int early_init_dt_scan_root(unsigned long node, const char *uname,
int depth, void *data);
+extern bool early_init_dt_scan(void *params);
+
+extern const char *of_flat_dt_get_machine_name(void);
+extern const void *of_flat_dt_match_machine(const void *default_match,
+ const void * (*get_next_compat)(const char * const**));
+
/* Other Prototypes */
extern void unflatten_device_tree(void);
+extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
#else /* CONFIG_OF_FLATTREE */
+static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
static inline void unflatten_device_tree(void) {}
+static inline void unflatten_and_copy_device_tree(void) {}
#endif /* CONFIG_OF_FLATTREE */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index a83dc6f5008e..f14123a5a9df 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
struct device_node;
@@ -47,7 +48,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(gc, struct of_mm_gpio_chip, gc);
}
-extern int of_get_named_gpio_flags(struct device_node *np,
+extern struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
extern int of_mm_gpiochip_add(struct device_node *np,
@@ -62,10 +63,10 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc,
#else /* CONFIG_OF_GPIO */
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(struct device_node *np,
+static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags)
{
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
}
static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
@@ -80,6 +81,18 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
#endif /* CONFIG_OF_GPIO */
+static inline int of_get_named_gpio_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags)
+{
+ struct gpio_desc *desc;
+ desc = of_get_named_gpiod_flags(np, list_name, index, flags);
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ else
+ return desc_to_gpio(desc);
+}
+
/**
* of_gpio_named_count() - Count GPIOs for a device
* @np: device node to count GPIOs for
@@ -117,15 +130,21 @@ static inline int of_gpio_count(struct device_node *np)
}
/**
- * of_get_gpio_flags() - Get a GPIO number and flags to use with GPIO API
+ * of_get_gpiod_flags() - Get a GPIO descriptor and flags to use with GPIO API
* @np: device node to get GPIO from
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * Returns GPIO descriptor to use with Linux generic GPIO API, or a errno
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
+static inline struct gpio_desc *of_get_gpiod_flags(struct device_node *np,
+ int index, enum of_gpio_flags *flags)
+{
+ return of_get_named_gpiod_flags(np, "gpios", index, flags);
+}
+
static inline int of_get_gpio_flags(struct device_node *np, int index,
enum of_gpio_flags *flags)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..3f23b4472c31 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
#ifndef __OF_IRQ_H
#define __OF_IRQ_H
-#if defined(CONFIG_OF)
-struct of_irq;
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/irq.h>
@@ -10,30 +8,6 @@ struct of_irq;
#include <linux/ioport.h>
#include <linux/of.h>
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently. However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
-/**
- * of_irq - container for device_node/irq_specifier pair for an irq controller
- * @controller: pointer to interrupt controller device tree node
- * @size: size of interrupt specifier
- * @specifier: array of cells @size long specifing the specific interrupt
- *
- * This structure is returned when an interrupt is mapped. The controller
- * field needs to be put() after use
- */
-#define OF_MAX_IRQ_SPEC 4 /* We handle specifiers of at most 4 cells */
-struct of_irq {
- struct device_node *controller; /* Interrupt controller node */
- u32 size; /* Specifier size */
- u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
-};
-
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
/*
@@ -45,37 +19,46 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_map_oldworld(struct device_node *device, int index,
- struct of_irq *out_irq);
+extern int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_map_oldworld(struct device_node *device, int index,
- struct of_irq *out_irq)
+static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
{
return -EINVAL;
}
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
-
-extern int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
- u32 ointsize, const __be32 *addr,
- struct of_irq *out_irq);
-extern int of_irq_map_one(struct device_node *device, int index,
- struct of_irq *out_irq);
-extern unsigned int irq_create_of_mapping(struct device_node *controller,
- const u32 *intspec,
- unsigned int intsize);
+extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
+extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern int of_irq_count(struct device_node *dev);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
extern void of_irq_init(const struct of_device_id *matches);
-#endif /* CONFIG_OF_IRQ */
+#ifdef CONFIG_OF_IRQ
+extern int of_irq_count(struct device_node *dev);
+#else
+static inline int of_irq_count(struct device_node *dev)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently. However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
#else /* !CONFIG_OF */
static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
index ed7f267e6389..6f10e938ff7e 100644
--- a/include/linux/of_mtd.h
+++ b/include/linux/of_mtd.h
@@ -10,10 +10,29 @@
#define __LINUX_OF_NET_H
#ifdef CONFIG_OF_MTD
+
#include <linux/of.h>
int of_get_nand_ecc_mode(struct device_node *np);
int of_get_nand_bus_width(struct device_node *np);
bool of_get_nand_on_flash_bbt(struct device_node *np);
-#endif
+
+#else /* CONFIG_OF_MTD */
+
+static inline int of_get_nand_ecc_mode(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline int of_get_nand_bus_width(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return false;
+}
+
+#endif /* CONFIG_OF_MTD */
#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 61bf53b02779..34597c8c1a4c 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -9,10 +9,10 @@
#ifdef CONFIG_OF_NET
#include <linux/of.h>
-extern const int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
#else
-static inline const int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np)
{
return -ENODEV;
}
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 7a04826018c0..1a1f5ffd5288 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -2,10 +2,12 @@
#define __OF_PCI_H
#include <linux/pci.h>
+#include <linux/msi.h>
struct pci_dev;
-struct of_irq;
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
+struct of_phandle_args;
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
@@ -13,4 +15,15 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
int of_pci_get_devfn(struct device_node *np);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
+int of_pci_msi_chip_add(struct msi_chip *chip);
+void of_pci_msi_chip_remove(struct msi_chip *chip);
+struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node);
+#else
+static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; }
+static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { }
+static inline struct msi_chip *
+of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
+#endif
+
#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index da60007075b5..4cd62677feb9 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -82,6 +82,11 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false;
}
+static inline bool oom_gfp_allowed(gfp_t gfp_mask)
+{
+ return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
+}
+
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
/* sysctls */
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644
index 3aca2b8def33..000000000000
--- a/include/linux/opp.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- * Nishanth Menon
- * Romit Dasgupta
- * Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_OPP_H__
-#define __LINUX_OPP_H__
-
-#include <linux/err.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-
-struct opp;
-struct device;
-
-enum opp_event {
- OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
-};
-
-#if defined(CONFIG_PM_OPP)
-
-unsigned long opp_get_voltage(struct opp *opp);
-
-unsigned long opp_get_freq(struct opp *opp);
-
-int opp_get_opp_count(struct device *dev);
-
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
- bool available);
-
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
-
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
-
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
-
-int opp_enable(struct device *dev, unsigned long freq);
-
-int opp_disable(struct device *dev, unsigned long freq);
-
-struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-#else
-static inline unsigned long opp_get_voltage(struct opp *opp)
-{
- return 0;
-}
-
-static inline unsigned long opp_get_freq(struct opp *opp)
-{
- return 0;
-}
-
-static inline int opp_get_opp_count(struct device *dev)
-{
- return 0;
-}
-
-static inline struct opp *opp_find_freq_exact(struct device *dev,
- unsigned long freq, bool available)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_floor(struct device *dev,
- unsigned long *freq)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_ceil(struct device *dev,
- unsigned long *freq)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline int opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
-{
- return -EINVAL;
-}
-
-static inline int opp_enable(struct device *dev, unsigned long freq)
-{
- return 0;
-}
-
-static inline int opp_disable(struct device *dev, unsigned long freq)
-{
- return 0;
-}
-
-static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif /* CONFIG_PM_OPP */
-
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
- return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-#else
-static inline int opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
- return -EINVAL;
-}
-
-static inline
-void opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table)
-{
-}
-#endif /* CONFIG_CPU_FREQ */
-
-#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 86292beebfe2..438694650471 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -129,10 +129,9 @@ struct parallel_data {
struct padata_serial_queue __percpu *squeue;
atomic_t reorder_objects;
atomic_t refcnt;
+ atomic_t seq_nr;
struct padata_cpumask cpumask;
spinlock_t lock ____cacheline_aligned;
- spinlock_t seq_lock;
- unsigned int seq_nr;
unsigned int processed;
struct timer_list timer;
};
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 93506a114034..da523661500a 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -38,10 +38,10 @@
* The last is when there is insufficient space in page->flags and a separate
* lookup is necessary.
*
- * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
- * " plus space for last_nid: | NODE | ZONE | LAST_NID ... | FLAGS |
- * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
- * " plus space for last_nid: | SECTION | NODE | ZONE | LAST_NID ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
*/
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -62,15 +62,21 @@
#endif
#ifdef CONFIG_NUMA_BALANCING
-#define LAST_NID_SHIFT NODES_SHIFT
+#define LAST__PID_SHIFT 8
+#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
+
+#define LAST__CPU_SHIFT NR_CPUS_BITS
+#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
+
+#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
#else
-#define LAST_NID_SHIFT 0
+#define LAST_CPUPID_SHIFT 0
#endif
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
-#define LAST_NID_WIDTH LAST_NID_SHIFT
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
-#define LAST_NID_WIDTH 0
+#define LAST_CPUPID_WIDTH 0
#endif
/*
@@ -81,8 +87,8 @@
#define NODE_NOT_IN_PAGE_FLAGS
#endif
-#if defined(CONFIG_NUMA_BALANCING) && LAST_NID_WIDTH == 0
-#define LAST_NID_NOT_IN_PAGE_FLAGS
+#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6d53675c2b54..98ada58f9942 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -329,7 +329,9 @@ static inline void set_page_writeback(struct page *page)
* System with lots of page flags available. This allows separate
* flags for PageHead() and PageTail() checks of compound pages so that bit
* tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths.
+ * generally not used in hot code paths except arch/powerpc/mm/init_64.c
+ * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
+ * and avoid handling those in real mode.
*/
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
__PAGEFLAG(Tail, tail)
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index d006f0ca60f4..5a462c4e5009 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
- return DEVICE_ACPI_HANDLE(pbus->bridge);
+ return ACPI_HANDLE(pbus->bridge);
}
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
@@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
else
dev = &pbus->self->dev;
- return DEVICE_ACPI_HANDLE(dev);
+ return ACPI_HANDLE(dev);
}
void acpi_pci_add_bus(struct pci_bus *bus);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 20888589c09e..a13d6825e586 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -32,7 +32,6 @@
#include <linux/irqreturn.h>
#include <uapi/linux/pci.h>
-/* Include the ID list */
#include <linux/pci_ids.h>
/*
@@ -42,9 +41,10 @@
*
* 7:3 = slot
* 2:0 = function
- * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined uapi/linux/pci.h
+ *
+ * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
* In the interest of not exposing interfaces to user-space unnecessarily,
- * the following kernel only defines are being added here.
+ * the following kernel-only defines are being added here.
*/
#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
@@ -153,10 +153,10 @@ enum pcie_reset_state {
/* Reset is NOT asserted (Use to deassert reset) */
pcie_deassert_reset = (__force pcie_reset_state_t) 1,
- /* Use #PERST to reset PCI-E device */
+ /* Use #PERST to reset PCIe device */
pcie_warm_reset = (__force pcie_reset_state_t) 2,
- /* Use PCI-E Hot Reset to reset device */
+ /* Use PCIe Hot Reset to reset device */
pcie_hot_reset = (__force pcie_reset_state_t) 3
};
@@ -259,13 +259,13 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
- u8 pcie_cap; /* PCI-E capability offset */
+ u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
- u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
+ u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
u8 rom_base_reg; /* which config register controls the ROM */
- u8 pin; /* which interrupt pin this device uses */
- u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */
+ u8 pin; /* which interrupt pin this device uses */
+ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
struct pci_driver *driver; /* which driver has allocated this device */
u64 dma_mask; /* Mask of the bits of bus address this
@@ -300,7 +300,7 @@ struct pci_dev {
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
- struct pcie_link_state *link_state; /* ASPM link state. */
+ struct pcie_link_state *link_state; /* ASPM link state */
#endif
pci_channel_state_t error_state; /* current connectivity state */
@@ -317,7 +317,7 @@ struct pci_dev {
bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
- unsigned int transparent:1; /* Transparent PCI bridge */
+ unsigned int transparent:1; /* Subtractive decode PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
@@ -326,12 +326,10 @@ struct pci_dev {
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
- unsigned int msi_enabled:1;
+ unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
- unsigned int is_pcie:1; /* Obsolete. Will be removed.
- Use pci_is_pcie() instead */
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
@@ -373,7 +371,6 @@ static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
if (dev->is_virtfn)
dev = dev->physfn;
#endif
-
return dev;
}
@@ -446,6 +443,7 @@ struct pci_bus {
struct resource busn_res; /* bus numbers routed to this bus */
struct pci_ops *ops; /* configuration access functions */
+ struct msi_chip *msi; /* MSI controller */
void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
@@ -457,7 +455,7 @@ struct pci_bus {
char name[48];
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
- pci_bus_flags_t bus_flags; /* Inherited by child busses */
+ pci_bus_flags_t bus_flags; /* inherited by child buses */
struct device *bridge;
struct device dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
@@ -469,14 +467,27 @@ struct pci_bus {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
/*
- * Returns true if the pci bus is root (behind host-pci bridge),
+ * Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
*/
static inline bool pci_is_root_bus(struct pci_bus *pbus)
{
return !(pbus->parent);
}
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+ dev = pci_physfn(dev);
+ if (pci_is_root_bus(dev->bus))
+ return NULL;
+
+ return dev->bus->self;
+}
+
#ifdef CONFIG_PCI_MSI
static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
{
@@ -498,7 +509,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
/*
- * Translate above to generic errno for passing back through non-pci.
+ * Translate above to generic errno for passing back through non-PCI code.
*/
static inline int pcibios_err_to_errno(int err)
{
@@ -549,11 +560,12 @@ struct pci_dynids {
struct list_head list; /* for IDs added at runtime */
};
-/* ---------------------------------------------------------------- */
-/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
- * a set of callbacks in struct pci_error_handlers, then that device driver
- * will be notified of PCI bus errors, and will be driven to recovery
- * when an error occurs.
+
+/*
+ * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
+ * a set of callbacks in struct pci_error_handlers, that device driver
+ * will be notified of PCI bus errors, and will be driven to recovery
+ * when an error occurs.
*/
typedef unsigned int __bitwise pci_ers_result_t;
@@ -597,7 +609,6 @@ struct pci_error_handlers {
void (*resume)(struct pci_dev *dev);
};
-/* ---------------------------------------------------------------- */
struct module;
struct pci_driver {
@@ -701,10 +712,10 @@ extern enum pcie_bus_config_types pcie_bus_config;
extern struct bus_type pci_bus_type;
-/* Do NOT directly access these two variables, unless you are arch specific pci
- * code, or pci core code. */
+/* Do NOT directly access these two variables, unless you are arch-specific PCI
+ * code, or PCI core code. */
extern struct list_head pci_root_buses; /* list of all known PCI buses */
-/* Some device drivers need know if pci is initiated */
+/* Some device drivers need know if PCI is initiated */
int no_pci_devices(void);
void pcibios_resource_survey_bus(struct pci_bus *bus);
@@ -712,7 +723,7 @@ void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
-/* Architecture specific versions may override this (weak) */
+/* Architecture-specific versions may override this (weak) */
char *pcibios_setup(char *str);
/* Used only when drivers/pci/setup.c is used */
@@ -949,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
+bool pci_device_is_present(struct pci_dev *pdev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1246,7 +1258,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
- * a PCI domain is defined to be a set of PCI busses which share
+ * a PCI domain is defined to be a set of PCI buses which share
* configuration space.
*/
#ifdef CONFIG_PCI_DOMAINS
@@ -1556,65 +1568,65 @@ enum pci_fixup_pass {
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
- static const struct pci_fixup __pci_fixup_##name __used \
+ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, class, \
+ resume##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, class, \
+ suspend##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, \
+ resume##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, \
+ suspend##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#ifdef CONFIG_PCI_QUIRKS
@@ -1660,7 +1672,7 @@ extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mem_size;
-/* Architecture specific versions may override these (weak) */
+/* Architecture-specific versions may override these (weak) */
int pcibios_add_platform_entries(struct pci_dev *dev);
void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
@@ -1748,11 +1760,11 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
* pci_is_pcie - check if the PCI device is PCI Express capable
* @dev: PCI device
*
- * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
*/
static inline bool pci_is_pcie(struct pci_dev *dev)
{
- return !!pci_pcie_cap(dev);
+ return pci_pcie_cap(dev);
}
/**
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 430dd963707b..a2e2f1d17e16 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -39,8 +39,8 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
@@ -191,4 +191,3 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
void pci_configure_slot(struct pci_dev *dev);
#endif
-
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bc95b2b391bf..97fbecdd7a40 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -758,6 +758,7 @@
#define PCI_DEVICE_ID_HP_CISSE 0x323a
#define PCI_DEVICE_ID_HP_CISSF 0x323b
#define PCI_DEVICE_ID_HP_CISSH 0x323c
+#define PCI_DEVICE_ID_HP_CISSI 0x3239
#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
#define PCI_VENDOR_ID_PCTECH 0x1042
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 9572669eea97..4f1089f2cc98 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -23,7 +23,7 @@
#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
struct pcie_device {
- int irq; /* Service IRQ/MSI/MSI-X Vector */
+ int irq; /* Service IRQ/MSI/MSI-X Vector */
struct pci_dev *port; /* Root/Upstream/Downstream Port */
u32 service; /* Port service this device represents */
void *priv_data; /* Service Private Data */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cc88172c7d9a..9e4761caa80c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -332,7 +332,7 @@ do { \
#endif
#ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val))
#endif
#ifndef this_cpu_inc
@@ -375,22 +375,6 @@ do { \
# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
#endif
-#ifndef this_cpu_xor
-# ifndef this_cpu_xor_1
-# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_2
-# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_4
-# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef this_cpu_xor_8
-# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
-#endif
-
#define _this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) ret__; \
@@ -418,7 +402,7 @@ do { \
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
#endif
-#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
@@ -586,7 +570,7 @@ do { \
#endif
#ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val))
#endif
#ifndef __this_cpu_inc
@@ -629,22 +613,6 @@ do { \
# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
#endif
-#ifndef __this_cpu_xor
-# ifndef __this_cpu_xor_1
-# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_2
-# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_4
-# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# ifndef __this_cpu_xor_8
-# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
-# endif
-# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
-#endif
-
#define __this_cpu_generic_add_return(pcp, val) \
({ \
__this_cpu_add(pcp, val); \
@@ -668,7 +636,7 @@ do { \
__pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
#endif
-#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
+#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
new file mode 100644
index 000000000000..1900bd0fa639
--- /dev/null
+++ b/include/linux/percpu_ida.h
@@ -0,0 +1,81 @@
+#ifndef __PERCPU_IDA_H__
+#define __PERCPU_IDA_H__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+struct percpu_ida_cpu;
+
+struct percpu_ida {
+ /*
+ * number of tags available to be allocated, as passed to
+ * percpu_ida_init()
+ */
+ unsigned nr_tags;
+ unsigned percpu_max_size;
+ unsigned percpu_batch_size;
+
+ struct percpu_ida_cpu __percpu *tag_cpu;
+
+ /*
+ * Bitmap of cpus that (may) have tags on their percpu freelists:
+ * steal_tags() uses this to decide when to steal tags, and which cpus
+ * to try stealing from.
+ *
+ * It's ok for a freelist to be empty when its bit is set - steal_tags()
+ * will just keep looking - but the bitmap _must_ be set whenever a
+ * percpu freelist does have tags.
+ */
+ cpumask_t cpus_have_tags;
+
+ struct {
+ spinlock_t lock;
+ /*
+ * When we go to steal tags from another cpu (see steal_tags()),
+ * we want to pick a cpu at random. Cycling through them every
+ * time we steal is a bit easier and more or less equivalent:
+ */
+ unsigned cpu_last_stolen;
+
+ /* For sleeping on allocation failure */
+ wait_queue_head_t wait;
+
+ /*
+ * Global freelist - it's a stack where nr_free points to the
+ * top
+ */
+ unsigned nr_free;
+ unsigned *freelist;
+ } ____cacheline_aligned_in_smp;
+};
+
+/*
+ * Number of tags we move between the percpu freelist and the global freelist at
+ * a time
+ */
+#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
+/* Max size of percpu freelist, */
+#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
+
+int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
+void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
+
+void percpu_ida_destroy(struct percpu_ida *pool);
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+ unsigned long max_size, unsigned long batch_size);
+static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+{
+ return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
+ IDA_DEFAULT_PCPU_BATCH_MOVE);
+}
+
+typedef int (*percpu_ida_cb)(unsigned, void *);
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ void *data);
+
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
+#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 866e85c5eb94..2e069d1288df 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -294,9 +294,31 @@ struct ring_buffer;
*/
struct perf_event {
#ifdef CONFIG_PERF_EVENTS
- struct list_head group_entry;
+ /*
+ * entry onto perf_event_context::event_list;
+ * modifications require ctx->lock
+ * RCU safe iterations.
+ */
struct list_head event_entry;
+
+ /*
+ * XXX: group_entry and sibling_list should be mutually exclusive;
+ * either you're a sibling on a group, or you're the group leader.
+ * Rework the code to always use the same list element.
+ *
+ * Locked for modification by both ctx->mutex and ctx->lock; holding
+ * either sufficies for read.
+ */
+ struct list_head group_entry;
struct list_head sibling_list;
+
+ /*
+ * We need storage to track the entries in perf_pmu_migrate_context; we
+ * cannot use the event_entry because of RCU and we want to keep the
+ * group in tact which avoids us using the other two entries.
+ */
+ struct list_head migrate_entry;
+
struct hlist_node hlist_entry;
int nr_siblings;
int group_flags;
@@ -562,6 +584,10 @@ struct perf_sample_data {
struct perf_regs_user regs_user;
u64 stack_user_size;
u64 weight;
+ /*
+ * Transaction flags for abort events:
+ */
+ u64 txn;
};
static inline void perf_sample_data_init(struct perf_sample_data *data,
@@ -577,6 +603,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->stack_user_size = 0;
data->weight = 0;
data->data_src.val = 0;
+ data->txn = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 64ab823f7b74..48a4dc3cb8cf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -559,6 +559,7 @@ static inline int phy_read_status(struct phy_device *phydev) {
return phydev->drv->read_status(phydev);
}
+int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
new file mode 100644
index 000000000000..6d722695e027
--- /dev/null
+++ b/include/linux/phy/phy.h
@@ -0,0 +1,270 @@
+/*
+ * phy.h -- generic phy header file
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DRIVERS_PHY_H
+#define __DRIVERS_PHY_H
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+
+struct phy;
+
+/**
+ * struct phy_ops - set of function pointers for performing phy operations
+ * @init: operation to be performed for initializing phy
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the phy
+ * @power_off: powering off the phy
+ * @owner: the module owner containing the ops
+ */
+struct phy_ops {
+ int (*init)(struct phy *phy);
+ int (*exit)(struct phy *phy);
+ int (*power_on)(struct phy *phy);
+ int (*power_off)(struct phy *phy);
+ struct module *owner;
+};
+
+/**
+ * struct phy - represents the phy device
+ * @dev: phy device
+ * @id: id of the phy device
+ * @ops: function pointers for performing phy operations
+ * @init_data: list of PHY consumers (non-dt only)
+ * @mutex: mutex to protect phy_ops
+ * @init_count: used to protect when the PHY is used by multiple consumers
+ * @power_count: used to protect when the PHY is used by multiple consumers
+ */
+struct phy {
+ struct device dev;
+ int id;
+ const struct phy_ops *ops;
+ struct phy_init_data *init_data;
+ struct mutex mutex;
+ int init_count;
+ int power_count;
+};
+
+/**
+ * struct phy_provider - represents the phy provider
+ * @dev: phy provider device
+ * @owner: the module owner having of_xlate
+ * @of_xlate: function pointer to obtain phy instance from phy pointer
+ * @list: to maintain a linked list of PHY providers
+ */
+struct phy_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args);
+};
+
+/**
+ * struct phy_consumer - represents the phy consumer
+ * @dev_name: the device name of the controller that will use this PHY device
+ * @port: name given to the consumer port
+ */
+struct phy_consumer {
+ const char *dev_name;
+ const char *port;
+};
+
+/**
+ * struct phy_init_data - contains the list of PHY consumers
+ * @num_consumers: number of consumers for this PHY device
+ * @consumers: list of PHY consumers
+ */
+struct phy_init_data {
+ unsigned int num_consumers;
+ struct phy_consumer *consumers;
+};
+
+#define PHY_CONSUMER(_dev_name, _port) \
+{ \
+ .dev_name = _dev_name, \
+ .port = _port, \
+}
+
+#define to_phy(dev) (container_of((dev), struct phy, dev))
+
+#define of_phy_provider_register(dev, xlate) \
+ __of_phy_provider_register((dev), THIS_MODULE, (xlate))
+
+#define devm_of_phy_provider_register(dev, xlate) \
+ __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+
+static inline void phy_set_drvdata(struct phy *phy, void *data)
+{
+ dev_set_drvdata(&phy->dev, data);
+}
+
+static inline void *phy_get_drvdata(struct phy *phy)
+{
+ return dev_get_drvdata(&phy->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_PHY)
+int phy_pm_runtime_get(struct phy *phy);
+int phy_pm_runtime_get_sync(struct phy *phy);
+int phy_pm_runtime_put(struct phy *phy);
+int phy_pm_runtime_put_sync(struct phy *phy);
+void phy_pm_runtime_allow(struct phy *phy);
+void phy_pm_runtime_forbid(struct phy *phy);
+int phy_init(struct phy *phy);
+int phy_exit(struct phy *phy);
+int phy_power_on(struct phy *phy);
+int phy_power_off(struct phy *phy);
+struct phy *phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_get(struct device *dev, const char *string);
+void phy_put(struct phy *phy);
+void devm_phy_put(struct device *dev, struct phy *phy);
+struct phy *of_phy_simple_xlate(struct device *dev,
+ struct of_phandle_args *args);
+struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
+ struct phy_init_data *init_data);
+struct phy *devm_phy_create(struct device *dev,
+ const struct phy_ops *ops, struct phy_init_data *init_data);
+void phy_destroy(struct phy *phy);
+void devm_phy_destroy(struct device *dev, struct phy *phy);
+struct phy_provider *__of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+void of_phy_provider_unregister(struct phy_provider *phy_provider);
+void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider);
+#else
+static inline int phy_pm_runtime_get(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_get_sync(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_put(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_put_sync(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline void phy_pm_runtime_allow(struct phy *phy)
+{
+ return;
+}
+
+static inline void phy_pm_runtime_forbid(struct phy *phy)
+{
+ return;
+}
+
+static inline int phy_init(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_exit(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_power_on(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline int phy_power_off(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline struct phy *phy_get(struct device *dev, const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_phy_get(struct device *dev, const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void phy_put(struct phy *phy)
+{
+}
+
+static inline void devm_phy_put(struct device *dev, struct phy *phy)
+{
+}
+
+static inline struct phy *of_phy_simple_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *phy_create(struct device *dev,
+ const struct phy_ops *ops, struct phy_init_data *init_data)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_phy_create(struct device *dev,
+ const struct phy_ops *ops, struct phy_init_data *init_data)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void phy_destroy(struct phy *phy)
+{
+}
+
+static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
+{
+}
+
+static inline struct phy_provider *__of_phy_provider_register(
+ struct device *dev, struct module *owner, struct phy * (*of_xlate)(
+ struct device *dev, struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy_provider *__devm_of_phy_provider_register(struct device
+ *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void of_phy_provider_unregister(struct phy_provider *phy_provider)
+{
+}
+
+static inline void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider)
+{
+}
+#endif
+
+#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index e2772666f004..7246ef3d4455 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -23,6 +23,7 @@ struct bsd_acct_struct;
struct pid_namespace {
struct kref kref;
struct pidmap pidmap[PIDMAP_ENTRIES];
+ struct rcu_head rcu;
int last_pid;
unsigned int nr_hashed;
struct task_struct *child_reaper;
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 5979147d2bda..fefb88663975 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -144,6 +144,9 @@ extern struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
unsigned int pin);
+extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ const char *pin_group, const unsigned **pins,
+ unsigned *num_pins);
#ifdef CONFIG_OF
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
diff --git a/include/linux/i2c/at24.h b/include/linux/platform_data/at24.h
index 285025a9cdc9..c42aa89d34ee 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -28,7 +28,7 @@
*
* void get_mac_addr(struct memory_accessor *mem_acc, void *context)
* {
- * u8 *mac_addr = ethernet_pdata->mac_addr;
+ * u8 *mac_addr = ethernet_pdata->mac_addr;
* off_t offset = context;
*
* // Read MAC addr from EEPROM
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 6a293b7fff3b..cea9f70133c5 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -71,6 +71,10 @@ struct atmel_nand_data {
u8 on_flash_bbt; /* bbt on flash */
struct mtd_partition *parts;
unsigned int num_parts;
+ bool has_dma; /* support dma transfer */
+
+ /* default is false, only for at32ap7000 chip is true */
+ bool need_reset_workaround;
};
/* Serial */
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
new file mode 100644
index 000000000000..671d6502d241
--- /dev/null
+++ b/include/linux/platform_data/bd6107.h
@@ -0,0 +1,19 @@
+/*
+ * bd6107.h - Rohm BD6107 LEDs Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __BD6107_H__
+#define __BD6107_H__
+
+struct device;
+
+struct bd6107_platform_data {
+ struct device *fbdev;
+ int reset; /* Reset GPIO */
+ unsigned int def_value;
+};
+
+#endif
diff --git a/include/linux/platform_data/clk-nomadik.h b/include/linux/platform_data/clk-nomadik.h
deleted file mode 100644
index 5713c87b2477..000000000000
--- a/include/linux/platform_data/clk-nomadik.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Minimal platform data header */
-void nomadik_clk_init(void);
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 9d98f3aaa16c..97baf831e071 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,6 +10,9 @@
#ifndef __CLK_UX500_H
#define __CLK_UX500_H
+void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+
void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
u32 clkrst5_base, u32 clkrst6_base);
void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 8db5ae03b6e3..689a856b86f9 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -84,6 +84,8 @@ struct snd_platform_data {
u8 version;
u8 txnumevt;
u8 rxnumevt;
+ int tx_dma_channel;
+ int rx_dma_channel;
};
enum {
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h
new file mode 100644
index 000000000000..648b8ea61a22
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-hpbdma.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_RCAR_HPBDMA_H
+#define __DMA_RCAR_HPBDMA_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* Transmit sizes and respective register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_MAX
+};
+
+/* DMA control register (DCR) bits */
+#define HPB_DMAE_DCR_DTAMD (1u << 26)
+#define HPB_DMAE_DCR_DTAC (1u << 25)
+#define HPB_DMAE_DCR_DTAU (1u << 24)
+#define HPB_DMAE_DCR_DTAU1 (1u << 23)
+#define HPB_DMAE_DCR_SWMD (1u << 22)
+#define HPB_DMAE_DCR_BTMD (1u << 21)
+#define HPB_DMAE_DCR_PKMD (1u << 20)
+#define HPB_DMAE_DCR_CT (1u << 18)
+#define HPB_DMAE_DCR_ACMD (1u << 17)
+#define HPB_DMAE_DCR_DIP (1u << 16)
+#define HPB_DMAE_DCR_SMDL (1u << 13)
+#define HPB_DMAE_DCR_SPDAM (1u << 12)
+#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10)
+#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10)
+#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10)
+#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10)
+#define HPB_DMAE_DCR_SPDS_MASK (3u << 8)
+#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8)
+#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8)
+#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8)
+#define HPB_DMAE_DCR_DMDL (1u << 5)
+#define HPB_DMAE_DCR_DPDAM (1u << 4)
+#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2)
+#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2)
+#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2)
+#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2)
+#define HPB_DMAE_DCR_DPDS_MASK (3u << 0)
+#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0)
+#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0)
+#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0)
+
+/* Asynchronous reset register (ASYNCRSTR) bits */
+#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10)
+#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9)
+#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8)
+#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7)
+#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6)
+#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5)
+#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4)
+#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3)
+#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2)
+#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1)
+#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0)
+
+struct hpb_dmae_slave_config {
+ unsigned int id;
+ dma_addr_t addr;
+ u32 dcr;
+ u32 port;
+ u32 rstr;
+ u32 mdr;
+ u32 mdm;
+ u32 flags;
+#define HPB_DMAE_SET_ASYNC_RESET BIT(0)
+#define HPB_DMAE_SET_ASYNC_MODE BIT(1)
+ u32 dma_ch;
+};
+
+#define HPB_DMAE_CHANNEL(_irq, _s_id) \
+{ \
+ .ch_irq = _irq, \
+ .s_id = _s_id, \
+}
+
+struct hpb_dmae_channel {
+ unsigned int ch_irq;
+ unsigned int s_id;
+};
+
+struct hpb_dmae_pdata {
+ const struct hpb_dmae_slave_config *slaves;
+ int num_slaves;
+ const struct hpb_dmae_channel *channels;
+ int num_channels;
+ const unsigned int ts_shift[XMIT_SZ_MAX];
+ int num_hw_channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
new file mode 100644
index 000000000000..89ba1b0c90e4
--- /dev/null
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -0,0 +1,46 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Helper to encode the source selection constraints for early s3c socs. */
+#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
+
+enum s3c24xx_dma_bus {
+ S3C24XX_DMA_APB,
+ S3C24XX_DMA_AHB,
+};
+
+/**
+ * @bus: on which bus does the peripheral reside - AHB or APB.
+ * @handshake: is a handshake with the peripheral necessary
+ * @chansel: channel selection information, depending on variant; reqsel for
+ * s3c2443 and later and channel-selection map for earlier SoCs
+ * see CHANSEL doc in s3c2443-dma.c
+ */
+struct s3c24xx_dma_channel {
+ enum s3c24xx_dma_bus bus;
+ bool handshake;
+ u16 chansel;
+};
+
+/**
+ * struct s3c24xx_dma_platdata - platform specific settings
+ * @num_phy_channels: number of physical channels
+ * @channels: array of virtual channel descriptions
+ * @num_channels: number of virtual channels
+ */
+struct s3c24xx_dma_platdata {
+ int num_phy_channels;
+ struct s3c24xx_dma_channel *channels;
+ int num_channels;
+};
+
+struct dma_chan;
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index 57300fd7cc03..f50821cb64be 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -67,10 +67,10 @@ struct edmacc_param {
#define ITCCHEN BIT(23)
/*ch_status paramater of callback function possible values*/
-#define DMA_COMPLETE 1
-#define DMA_CC_ERROR 2
-#define DMA_TC1_ERROR 3
-#define DMA_TC2_ERROR 4
+#define EDMA_DMA_COMPLETE 1
+#define EDMA_DMA_CC_ERROR 2
+#define EDMA_DMA_TC1_ERROR 3
+#define EDMA_DMA_TC2_ERROR 4
enum address_mode {
INCR = 0,
@@ -180,4 +180,6 @@ struct edma_soc_info {
const s16 (*xbar_chans)[2];
};
+int edma_trigger_channel(unsigned);
+
#endif
diff --git a/include/linux/platform_data/exynos_thermal.h b/include/linux/platform_data/exynos_thermal.h
deleted file mode 100644
index da7e6274b175..000000000000
--- a/include/linux/platform_data/exynos_thermal.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * exynos_thermal.h - Samsung EXYNOS TMU (Thermal Management Unit)
- *
- * Copyright (C) 2011 Samsung Electronics
- * Donggeun Kim <dg77.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _LINUX_EXYNOS_THERMAL_H
-#define _LINUX_EXYNOS_THERMAL_H
-#include <linux/cpu_cooling.h>
-
-enum calibration_type {
- TYPE_ONE_POINT_TRIMMING,
- TYPE_TWO_POINT_TRIMMING,
- TYPE_NONE,
-};
-
-enum soc_type {
- SOC_ARCH_EXYNOS4210 = 1,
- SOC_ARCH_EXYNOS,
-};
-/**
- * struct freq_clip_table
- * @freq_clip_max: maximum frequency allowed for this cooling state.
- * @temp_level: Temperature level at which the temperature clipping will
- * happen.
- * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
- *
- * This structure is required to be filled and passed to the
- * cpufreq_cooling_unregister function.
- */
-struct freq_clip_table {
- unsigned int freq_clip_max;
- unsigned int temp_level;
- const struct cpumask *mask_val;
-};
-
-/**
- * struct exynos_tmu_platform_data
- * @threshold: basic temperature for generating interrupt
- * 25 <= threshold <= 125 [unit: degree Celsius]
- * @threshold_falling: differntial value for setting threshold
- * of temperature falling interrupt.
- * @trigger_levels: array for each interrupt levels
- * [unit: degree Celsius]
- * 0: temperature for trigger_level0 interrupt
- * condition for trigger_level0 interrupt:
- * current temperature > threshold + trigger_levels[0]
- * 1: temperature for trigger_level1 interrupt
- * condition for trigger_level1 interrupt:
- * current temperature > threshold + trigger_levels[1]
- * 2: temperature for trigger_level2 interrupt
- * condition for trigger_level2 interrupt:
- * current temperature > threshold + trigger_levels[2]
- * 3: temperature for trigger_level3 interrupt
- * condition for trigger_level3 interrupt:
- * current temperature > threshold + trigger_levels[3]
- * @trigger_level0_en:
- * 1 = enable trigger_level0 interrupt,
- * 0 = disable trigger_level0 interrupt
- * @trigger_level1_en:
- * 1 = enable trigger_level1 interrupt,
- * 0 = disable trigger_level1 interrupt
- * @trigger_level2_en:
- * 1 = enable trigger_level2 interrupt,
- * 0 = disable trigger_level2 interrupt
- * @trigger_level3_en:
- * 1 = enable trigger_level3 interrupt,
- * 0 = disable trigger_level3 interrupt
- * @gain: gain of amplifier in the positive-TC generator block
- * 0 <= gain <= 15
- * @reference_voltage: reference voltage of amplifier
- * in the positive-TC generator block
- * 0 <= reference_voltage <= 31
- * @noise_cancel_mode: noise cancellation mode
- * 000, 100, 101, 110 and 111 can be different modes
- * @type: determines the type of SOC
- * @efuse_value: platform defined fuse value
- * @cal_type: calibration type for temperature
- * @freq_clip_table: Table representing frequency reduction percentage.
- * @freq_tab_count: Count of the above table as frequency reduction may
- * applicable to only some of the trigger levels.
- *
- * This structure is required for configuration of exynos_tmu driver.
- */
-struct exynos_tmu_platform_data {
- u8 threshold;
- u8 threshold_falling;
- u8 trigger_levels[4];
- bool trigger_level0_en;
- bool trigger_level1_en;
- bool trigger_level2_en;
- bool trigger_level3_en;
-
- u8 gain;
- u8 reference_voltage;
- u8 noise_cancel_mode;
- u32 efuse_value;
-
- enum calibration_type cal_type;
- enum soc_type type;
- struct freq_clip_table freq_tab[4];
- unsigned int freq_tab_count;
-};
-#endif /* _LINUX_EXYNOS_THERMAL_H */
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
new file mode 100644
index 000000000000..6efd20264585
--- /dev/null
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -0,0 +1,60 @@
+/*
+ * DaVinci GPIO Platform Related Defines
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DAVINCI_GPIO_PLATFORM_H
+#define __DAVINCI_GPIO_PLATFORM_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <asm-generic/gpio.h>
+
+enum davinci_gpio_type {
+ GPIO_TYPE_TNETV107X = 0,
+};
+
+struct davinci_gpio_platform_data {
+ u32 ngpio;
+ u32 gpio_unbanked;
+ u32 intc_irq_num;
+};
+
+
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ int irq_base;
+ /* Serialize access to GPIO registers */
+ spinlock_t lock;
+ void __iomem *regs;
+ void __iomem *set_data;
+ void __iomem *clr_data;
+ void __iomem *in_data;
+ int gpio_unbanked;
+ unsigned gpio_irq;
+};
+
+/*
+ * basic gpio routines
+ */
+#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
+
+/* Convert GPIO signal to GPIO pin number */
+#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
+
+static inline u32 __gpio_mask(unsigned gpio)
+{
+ return 1 << (gpio % 32);
+}
+#endif
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
index 573edfb046c4..7c5a519d2dcd 100644
--- a/include/linux/platform_data/gpio-em.h
+++ b/include/linux/platform_data/gpio-em.h
@@ -5,6 +5,7 @@ struct gpio_em_config {
unsigned int gpio_base;
unsigned int irq_base;
unsigned int number_of_pins;
+ const char *pctl_name;
};
#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
new file mode 100644
index 000000000000..5ae0d9c80d4d
--- /dev/null
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -0,0 +1,21 @@
+/*
+ * gpio_backlight.h - Simple GPIO-controlled backlight
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __GPIO_BACKLIGHT_H__
+#define __GPIO_BACKLIGHT_H__
+
+struct device;
+
+struct gpio_backlight_platform_data {
+ struct device *fbdev;
+ int gpio;
+ int def_value;
+ bool active_low;
+ const char *name;
+};
+
+#endif
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 202e290faea8..624ff9edad6f 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -22,6 +22,7 @@
struct lp55xx_led_config {
const char *name;
+ const char *default_trigger;
u8 chan_nr;
u8 led_current; /* mA x10, 0 if led is not connected */
u8 max_current;
@@ -36,6 +37,13 @@ struct lp55xx_predef_pattern {
u8 size_b;
};
+enum lp8501_pwr_sel {
+ LP8501_ALL_VDD, /* D1~9 are connected to VDD */
+ LP8501_6VDD_3VOUT, /* D1~6 with VDD, D7~9 with VOUT */
+ LP8501_3VDD_6VOUT, /* D1~6 with VOUT, D7~9 with VDD */
+ LP8501_ALL_VOUT, /* D1~9 are connected to VOUT */
+};
+
/*
* struct lp55xx_platform_data
* @led_config : Configurable led class device
@@ -59,14 +67,15 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
- /* Platform specific functions */
- int (*setup_resources)(void);
- void (*release_resources)(void);
- void (*enable)(bool state);
+ /* optional enable GPIO */
+ int enable_gpio;
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
unsigned int num_patterns;
+
+ /* LP8501 specific */
+ enum lp8501_pwr_sel pwr_sel;
};
#endif /* _LEDS_LP55XX_H */
diff --git a/include/linux/platform_data/leds-pca9633.h b/include/linux/platform_data/leds-pca963x.h
index c5bf29b6fa7f..e731f0036329 100644
--- a/include/linux/platform_data/leds-pca9633.h
+++ b/include/linux/platform_data/leds-pca963x.h
@@ -1,7 +1,8 @@
/*
- * PCA9633 LED chip driver.
+ * PCA963X LED chip driver.
*
* Copyright 2012 bct electronic GmbH
+ * Copyright 2013 Qtechnology A/S
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,18 +19,24 @@
* 02110-1301 USA
*/
-#ifndef __LINUX_PCA9633_H
-#define __LINUX_PCA9633_H
+#ifndef __LINUX_PCA963X_H
+#define __LINUX_PCA963X_H
#include <linux/leds.h>
-enum pca9633_outdrv {
- PCA9633_OPEN_DRAIN,
- PCA9633_TOTEM_POLE, /* aka push-pull */
+enum pca963x_outdrv {
+ PCA963X_OPEN_DRAIN,
+ PCA963X_TOTEM_POLE, /* aka push-pull */
};
-struct pca9633_platform_data {
+enum pca963x_blink_type {
+ PCA963X_SW_BLINK,
+ PCA963X_HW_BLINK,
+};
+
+struct pca963x_platform_data {
struct led_platform_data leds;
- enum pca9633_outdrv outdrv;
+ enum pca963x_outdrv outdrv;
+ enum pca963x_blink_type blink_type;
};
-#endif /* __LINUX_PCA9633_H*/
+#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/platform_data/leds-pca9685.h b/include/linux/platform_data/leds-pca9685.h
new file mode 100644
index 000000000000..778e9e4249cc
--- /dev/null
+++ b/include/linux/platform_data/leds-pca9685.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2013 Maximilian Güntner <maximilian.guentner@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Based on leds-pca963x.h by Peter Meerwald <p.meerwald@bct-electronic.com>
+ *
+ * LED driver for the NXP PCA9685 PWM chip
+ *
+ */
+
+#ifndef __LINUX_PCA9685_H
+#define __LINUX_PCA9685_H
+
+#include <linux/leds.h>
+
+enum pca9685_outdrv {
+ PCA9685_OPEN_DRAIN,
+ PCA9685_TOTEM_POLE,
+};
+
+enum pca9685_inverted {
+ PCA9685_NOT_INVERTED,
+ PCA9685_INVERTED,
+};
+
+struct pca9685_platform_data {
+ struct led_platform_data leds;
+ enum pca9685_outdrv outdrv;
+ enum pca9685_inverted inverted;
+};
+
+#endif /* __LINUX_PCA9685_H */
diff --git a/include/linux/platform_data/leds-renesas-tpu.h b/include/linux/platform_data/leds-renesas-tpu.h
deleted file mode 100644
index 055387086fc1..000000000000
--- a/include/linux/platform_data/leds-renesas-tpu.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __LEDS_RENESAS_TPU_H__
-#define __LEDS_RENESAS_TPU_H__
-
-struct led_renesas_tpu_config {
- char *name;
- unsigned pin_gpio_fn;
- unsigned pin_gpio;
- unsigned int channel_offset;
- unsigned int timer_bit;
- unsigned int max_brightness;
- unsigned int refresh_rate;
-};
-
-#endif /* __LEDS_RENESAS_TPU_H__ */
diff --git a/include/linux/platform_data/lm3630_bl.h b/include/linux/platform_data/lm3630_bl.h
deleted file mode 100644
index 9176dd3f2d63..000000000000
--- a/include/linux/platform_data/lm3630_bl.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-* Simple driver for Texas Instruments LM3630 LED Flash driver chip
-* Copyright (C) 2012 Texas Instruments
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 as
-* published by the Free Software Foundation.
-*
-*/
-
-#ifndef __LINUX_LM3630_H
-#define __LINUX_LM3630_H
-
-#define LM3630_NAME "lm3630_bl"
-
-enum lm3630_pwm_ctrl {
- PWM_CTRL_DISABLE = 0,
- PWM_CTRL_BANK_A,
- PWM_CTRL_BANK_B,
- PWM_CTRL_BANK_ALL,
-};
-
-enum lm3630_pwm_active {
- PWM_ACTIVE_HIGH = 0,
- PWM_ACTIVE_LOW,
-};
-
-enum lm3630_bank_a_ctrl {
- BANK_A_CTRL_DISABLE = 0x0,
- BANK_A_CTRL_LED1 = 0x4,
- BANK_A_CTRL_LED2 = 0x1,
- BANK_A_CTRL_ALL = 0x5,
-};
-
-enum lm3630_bank_b_ctrl {
- BANK_B_CTRL_DISABLE = 0,
- BANK_B_CTRL_LED2,
-};
-
-struct lm3630_platform_data {
-
- /* maximum brightness */
- int max_brt_led1;
- int max_brt_led2;
-
- /* initial on brightness */
- int init_brt_led1;
- int init_brt_led2;
- enum lm3630_pwm_ctrl pwm_ctrl;
- enum lm3630_pwm_active pwm_active;
- enum lm3630_bank_a_ctrl bank_a_ctrl;
- enum lm3630_bank_b_ctrl bank_b_ctrl;
- unsigned int pwm_period;
- void (*pwm_set_intensity) (int brightness, int max_brightness);
-};
-
-#endif /* __LINUX_LM3630_H */
diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h
new file mode 100644
index 000000000000..7538e38e270b
--- /dev/null
+++ b/include/linux/platform_data/lm3630a_bl.h
@@ -0,0 +1,65 @@
+/*
+* Simple driver for Texas Instruments LM3630A LED Flash driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+
+#ifndef __LINUX_LM3630A_H
+#define __LINUX_LM3630A_H
+
+#define LM3630A_NAME "lm3630a_bl"
+
+enum lm3630a_pwm_ctrl {
+ LM3630A_PWM_DISABLE = 0x00,
+ LM3630A_PWM_BANK_A,
+ LM3630A_PWM_BANK_B,
+ LM3630A_PWM_BANK_ALL,
+ LM3630A_PWM_BANK_A_ACT_LOW = 0x05,
+ LM3630A_PWM_BANK_B_ACT_LOW,
+ LM3630A_PWM_BANK_ALL_ACT_LOW,
+};
+
+enum lm3630a_leda_ctrl {
+ LM3630A_LEDA_DISABLE = 0x00,
+ LM3630A_LEDA_ENABLE = 0x04,
+ LM3630A_LEDA_ENABLE_LINEAR = 0x14,
+};
+
+enum lm3630a_ledb_ctrl {
+ LM3630A_LEDB_DISABLE = 0x00,
+ LM3630A_LEDB_ON_A = 0x01,
+ LM3630A_LEDB_ENABLE = 0x02,
+ LM3630A_LEDB_ENABLE_LINEAR = 0x0A,
+};
+
+#define LM3630A_MAX_BRIGHTNESS 255
+/*
+ *@leda_init_brt : led a init brightness. 4~255
+ *@leda_max_brt : led a max brightness. 4~255
+ *@leda_ctrl : led a disable, enable linear, enable exponential
+ *@ledb_init_brt : led b init brightness. 4~255
+ *@ledb_max_brt : led b max brightness. 4~255
+ *@ledb_ctrl : led b disable, enable linear, enable exponential
+ *@pwm_period : pwm period
+ *@pwm_ctrl : pwm disable, bank a or b, active high or low
+ */
+struct lm3630a_platform_data {
+
+ /* led a config. */
+ int leda_init_brt;
+ int leda_max_brt;
+ enum lm3630a_leda_ctrl leda_ctrl;
+ /* led b config. */
+ int ledb_init_brt;
+ int ledb_max_brt;
+ enum lm3630a_ledb_ctrl ledb_ctrl;
+ /* pwm config. */
+ unsigned int pwm_period;
+ enum lm3630a_pwm_ctrl pwm_ctrl;
+};
+
+#endif /* __LINUX_LM3630A_H */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ea3200527dd3..1b2ba24e4e03 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -40,6 +40,17 @@
#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG
#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG
+/* CONFIG register - LP8555 */
+#define LP8555_PWM_STANDBY BIT(7)
+#define LP8555_PWM_FILTER BIT(6)
+#define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
+ when the backlight turns on */
+#define LP8555_OFF_OPENLEDS BIT(2)
+#define LP8555_PWM_CONFIG LP8555_PWM_ONLY
+#define LP8555_I2C_CONFIG LP8555_I2C_ONLY
+#define LP8555_COMB1_CONFIG LP8555_COMBINED1
+#define LP8555_COMB2_CONFIG LP8555_COMBINED2
+
/* DEVICE CONTROL register - LP8556 */
#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT)
#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT)
@@ -65,6 +76,7 @@ enum lp855x_chip_id {
LP8551,
LP8552,
LP8553,
+ LP8555,
LP8556,
LP8557,
};
@@ -89,6 +101,13 @@ enum lp8553_brighntess_source {
LP8553_I2C_ONLY = LP8550_I2C_ONLY,
};
+enum lp8555_brightness_source {
+ LP8555_PWM_ONLY,
+ LP8555_I2C_ONLY,
+ LP8555_COMBINED1, /* Brightness register with shaped PWM */
+ LP8555_COMBINED2, /* PWM with shaped brightness register */
+};
+
enum lp8556_brightness_source {
LP8556_PWM_ONLY,
LP8556_COMBINED1, /* pwm + i2c before the shaper block */
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
new file mode 100644
index 000000000000..7dc4d9a219a6
--- /dev/null
+++ b/include/linux/platform_data/lv5207lp.h
@@ -0,0 +1,19 @@
+/*
+ * lv5207lp.h - Sanyo LV5207LP LEDs Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LV5207LP_H__
+#define __LV5207LP_H__
+
+struct device;
+
+struct lv5207lp_platform_data {
+ struct device *fbdev;
+ unsigned int max_value;
+ unsigned int def_value;
+};
+
+#endif
diff --git a/include/linux/platform_data/mipi-csis.h b/include/linux/platform_data/mipi-csis.h
index bf34e17cee7f..c2fd9024717c 100644
--- a/include/linux/platform_data/mipi-csis.h
+++ b/include/linux/platform_data/mipi-csis.h
@@ -25,13 +25,4 @@ struct s5p_platform_mipi_csis {
u8 hs_settle;
};
-/**
- * s5p_csis_phy_enable - global MIPI-CSI receiver D-PHY control
- * @id: MIPI-CSIS harware instance index (0...1)
- * @on: true to enable D-PHY and deassert its reset
- * false to disable D-PHY
- * @return: 0 on success, or negative error code on failure
- */
-int s5p_csis_phy_enable(int id, bool on);
-
#endif /* __PLAT_SAMSUNG_MIPI_CSIS_H_ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index d44912d81578..75f70f6ac137 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -10,6 +10,8 @@
#ifndef __ASM_ARCH_IMX_ESDHC_H
#define __ASM_ARCH_IMX_ESDHC_H
+#include <linux/types.h>
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -32,6 +34,7 @@ enum cd_types {
* @cd_gpio: gpio for card_detect interrupt
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
+ * @support_vsel: indicate it supports 1.8v switching
*/
struct esdhc_platform_data {
@@ -41,5 +44,7 @@ struct esdhc_platform_data {
enum cd_types cd_type;
int max_bus_width;
unsigned int f_max;
+ bool support_vsel;
+ unsigned int delay_line;
};
#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 6bf9ef43ddb1..4da5bfa2147f 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -23,13 +23,16 @@ enum nand_io {
};
enum omap_ecc {
- /* 1-bit ecc: stored at end of spare area */
- OMAP_ECC_HAMMING_CODE_DEFAULT = 0, /* Default, s/w method */
- OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
- /* 1-bit ecc: stored at beginning of spare area as romcode */
- OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
- OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
- OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
+ /* 1-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_HAM1_CODE_HW = 0,
+ /* 4-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
+ /* 4-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH4_CODE_HW,
+ /* 8-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
+ /* 8-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH8_CODE_HW,
};
struct gpmc_nand_regs {
@@ -63,5 +66,6 @@ struct omap_nand_platform_data {
/* for passing the partitions */
struct device_node *of_node;
+ struct device_node *elm_of_node;
};
#endif
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
index c42f39f20195..ffb801998e5d 100644
--- a/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -16,19 +16,6 @@ struct pxa3xx_nand_timing {
unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
};
-struct pxa3xx_nand_cmdset {
- uint16_t read1;
- uint16_t read2;
- uint16_t program;
- uint16_t read_status;
- uint16_t read_id;
- uint16_t erase;
- uint16_t reset;
- uint16_t lock;
- uint16_t unlock;
- uint16_t lock_status;
-};
-
struct pxa3xx_nand_flash {
char *name;
uint32_t chip_id;
diff --git a/include/linux/i2c/pca953x.h b/include/linux/platform_data/pca953x.h
index 3c98dd4f901f..3c98dd4f901f 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h
new file mode 100644
index 000000000000..8f91300617ec
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-adi2.h
@@ -0,0 +1,40 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+
+#ifndef PINCTRL_ADI2_H
+#define PINCTRL_ADI2_H
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data
+ * for ADI GPIO2 device.
+ *
+ * @port_gpio_base: Optional global GPIO index of the GPIO bank.
+ * 0 means driver decides.
+ * @port_pin_base: Pin index of the pin controller device.
+ * @port_width: PIN number of the GPIO bank device
+ * @pint_id: GPIO PINT device id that this GPIO bank should map to.
+ * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A
+ * GPIO bank can be mapped into either low 16 bits[0] or high 16
+ * bits[1] of each PINT register.
+ * @pint_map: GIOP bank mapping code in PINT device
+ */
+struct adi_pinctrl_gpio_platform_data {
+ unsigned int port_gpio_base;
+ unsigned int port_pin_base;
+ unsigned int port_width;
+ u8 pinctrl_id;
+ u8 pint_id;
+ bool pint_assign;
+ u8 pint_map;
+};
+
+#endif
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
new file mode 100644
index 000000000000..72eacda9b360
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -0,0 +1,12 @@
+/**
+ * irq: optional wake-up interrupt
+ * rearm: optional soc specific rearm function
+ *
+ * Note that the irq and rearm setup should come from device
+ * tree except for omap where there are still some dependencies
+ * to the legacy PRM code.
+ */
+struct pcs_pdata {
+ int irq;
+ void (*rearm)(void);
+};
diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
deleted file mode 100644
index 5f28cae18582..000000000000
--- a/include/linux/platform_data/usb-ehci-s5p.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __PLAT_SAMSUNG_EHCI_H
-#define __PLAT_SAMSUNG_EHCI_H __FILE__
-
-struct s5p_ehci_platdata {
- int (*phy_init)(struct platform_device *pdev, int type);
- int (*phy_exit)(struct platform_device *pdev, int type);
-};
-
-extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
-
-#endif /* __PLAT_SAMSUNG_EHCI_H */
diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
deleted file mode 100644
index c256c595be5e..000000000000
--- a/include/linux/platform_data/usb-ohci-exynos.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * http://www.samsung.com/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef __MACH_EXYNOS_OHCI_H
-#define __MACH_EXYNOS_OHCI_H
-
-struct exynos4_ohci_platdata {
- int (*phy_init)(struct platform_device *pdev, int type);
- int (*phy_exit)(struct platform_device *pdev, int type);
-};
-
-extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
-
-#endif /* __MACH_EXYNOS_OHCI_H */
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h
new file mode 100644
index 000000000000..dd3ba46c0d90
--- /dev/null
+++ b/include/linux/platform_data/usb-rcar-gen2-phy.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __USB_RCAR_GEN2_PHY_H
+#define __USB_RCAR_GEN2_PHY_H
+
+#include <linux/types.h>
+
+struct rcar_gen2_phy_platform_data {
+ /* USB channel 0 configuration */
+ bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
+ /* USB channel 2 configuration */
+ bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
+};
+
+#endif
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
new file mode 100644
index 000000000000..0472ab2f6ede
--- /dev/null
+++ b/include/linux/platform_data/zforce_ts.h
@@ -0,0 +1,26 @@
+/* drivers/input/touchscreen/zforce.c
+ *
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_INPUT_ZFORCE_TS_H
+#define _LINUX_INPUT_ZFORCE_TS_H
+
+struct zforce_ts_platdata {
+ int gpio_int;
+ int gpio_rst;
+
+ unsigned int x_max;
+ unsigned int y_max;
+};
+
+#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index ce8e4ffd78c7..16f6654082dd 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -178,6 +178,7 @@ struct platform_driver {
int (*resume)(struct platform_device *);
struct device_driver driver;
const struct platform_device_id *id_table;
+ bool prevent_deferred_probe;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644
index 000000000000..5151b0059585
--- /dev/null
+++ b/include/linux/pm_opp.h
@@ -0,0 +1,139 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+ OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ return -EINVAL;
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+ struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+ return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ return -EINVAL;
+}
+
+static inline
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+}
+#endif /* CONFIG_CPU_FREQ */
+
+#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 000000000000..9f0283721cbc
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,16 @@
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+struct bq24190_platform_data {
+ unsigned int gpio_int; /* GPIO pin that's connected to INT# */
+};
+
+#endif
diff --git a/include/linux/irqchip/bcm2835.h b/include/linux/power/bq24735-charger.h
index 48a859bc9dca..f536164a6069 100644
--- a/include/linux/irqchip/bcm2835.h
+++ b/include/linux/power/bq24735-charger.h
@@ -1,5 +1,4 @@
/*
- * Copyright (C) 2010 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,14 +15,25 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __LINUX_IRQCHIP_BCM2835_H_
-#define __LINUX_IRQCHIP_BCM2835_H_
+#ifndef __CHARGER_BQ24735_H_
+#define __CHARGER_BQ24735_H_
-#include <asm/exception.h>
+#include <linux/types.h>
+#include <linux/power_supply.h>
-extern void bcm2835_init_irq(void);
+struct bq24735_platform {
+ uint32_t charge_current;
+ uint32_t charge_voltage;
+ uint32_t input_current;
-extern asmlinkage void __exception_irq_entry bcm2835_handle_irq(
- struct pt_regs *regs);
+ const char *name;
-#endif
+ int status_gpio;
+ int status_gpio_active_low;
+ bool status_gpio_valid;
+
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+#endif /* __CHARGER_BQ24735_H_ */
diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h
new file mode 100644
index 000000000000..23110dc77726
--- /dev/null
+++ b/include/linux/power/twl4030_madc_battery.h
@@ -0,0 +1,39 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TWL4030_MADC_BATTERY_H
+#define __TWL4030_MADC_BATTERY_H
+
+/*
+ * Usually we can assume 100% @ 4.15V and 0% @ 3.3V but curves differ for
+ * charging and discharging!
+ */
+
+struct twl4030_madc_bat_calibration {
+ short voltage; /* in mV - specify -1 for end of list */
+ short level; /* in percent (0 .. 100%) */
+};
+
+struct twl4030_madc_bat_platform_data {
+ unsigned int capacity; /* total capacity in uAh */
+ struct twl4030_madc_bat_calibration *charging;
+ int charging_size;
+ struct twl4030_madc_bat_calibration *discharging;
+ int discharging_size;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 804b90643a85..5c2600630dc9 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,7 @@
#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/spinlock.h>
struct device;
@@ -194,6 +195,8 @@ struct power_supply {
/* private */
struct device *dev;
struct work_struct changed_work;
+ spinlock_t changed_lock;
+ bool changed;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644
index 000000000000..4e250417ee30
--- /dev/null
+++ b/include/linux/powercap.h
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable: Enable/Disable whole control type.
+ * Default is enabled. But this callback allows all zones
+ * to be in disable state and remove any applied power
+ * limits. If disabled power zone can only be monitored
+ * not controlled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free data
+ * structure associated with this control type.
+ * This callback is mandatory if the client own memory
+ * for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+ int (*set_enable) (struct powercap_control_type *, bool mode);
+ int (*get_enable) (struct powercap_control_type *, bool *mode);
+ int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name: name of control_type
+ * @dev: device for this control_type
+ * @idr: idr to have unique id for its child
+ * @root_node: Root holding power zones for this control_type
+ * @ops: Pointer to callback struct
+ * @node_lock: mutex for control type
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @ctrl_inst: link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+ struct device dev;
+ struct idr idr;
+ int nr_zones;
+ const struct powercap_control_type_ops *ops;
+ struct mutex lock;
+ bool allocated;
+ struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj: Get maximum range of energy counter in
+ * micro-joules.
+ * @get_energy_uj: Get current energy counter in micro-joules.
+ * @reset_energy_uj: Reset micro-joules energy counter.
+ * @get_max_power_range_uw: Get maximum range of power counter in
+ * micro-watts.
+ * @get_power_uw: Get current power counter in micro-watts.
+ * @set_enable: Enable/Disable power zone controls.
+ * Default is enabled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free
+ * data structure associated with this
+ * control type. Mandatory, if client driver owns
+ * the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+ int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+ int (*get_energy_uj) (struct powercap_zone *, u64 *);
+ int (*reset_energy_uj) (struct powercap_zone *);
+ int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+ int (*get_power_uw) (struct powercap_zone *, u64 *);
+ int (*set_enable) (struct powercap_zone *, bool mode);
+ int (*get_enable) (struct powercap_zone *, bool *mode);
+ int (*release) (struct powercap_zone *);
+};
+
+#define POWERCAP_ZONE_MAX_ATTRS 6
+#define POWERCAP_CONSTRAINTS_ATTRS 8
+#define MAX_CONSTRAINTS_PER_ZONE 10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id: Unique id
+ * @name: Power zone name.
+ * @control_type_inst: Control type instance for this zone.
+ * @ops: Pointer to the zone operation structure.
+ * @dev: Instance of a device.
+ * @const_id_cnt: Number of constraint defined.
+ * @idr: Instance to an idr entry for children zones.
+ * @parent_idr: To remove reference from the parent idr.
+ * @private_data: Private data pointer if any for this zone.
+ * @zone_dev_attrs: Attributes associated with this device.
+ * @zone_attr_count: Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups: Attribute group store to register with device.
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @constraint_ptr: List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+ int id;
+ char *name;
+ void *control_type_inst;
+ const struct powercap_zone_ops *ops;
+ struct device dev;
+ int const_id_cnt;
+ struct idr idr;
+ struct idr *parent_idr;
+ void *private_data;
+ struct attribute **zone_dev_attrs;
+ int zone_attr_count;
+ struct attribute_group dev_zone_attr_group;
+ const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+ bool allocated;
+ struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw: Set power limit in micro-watts.
+ * @get_power_limit_uw: Get power limit in micro-watts.
+ * @set_time_window_us: Set time window in micro-seconds.
+ * @get_time_window_us: Get time window in micro-seconds.
+ * @get_max_power_uw: Get max power allowed in micro-watts.
+ * @get_min_power_uw: Get min power allowed in micro-watts.
+ * @get_max_time_window_us: Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us: Get min time window allowed in micro-seconds.
+ * @get_name: Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ * set_power_limit_uw
+ * get_power_limit_uw
+ * set_time_window_us
+ * get_time_window_us
+ * get_name
+ * Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+ int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+ int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+ int (*set_time_window_us) (struct powercap_zone *, int, u64);
+ int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+ const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id: Instance Id of this constraint.
+ * @power_zone: Pointer to the power zone for this constraint.
+ * @ops: Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+ int id;
+ struct powercap_zone *power_zone;
+ struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone) (&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+* @pdata: A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+ void *pdata)
+{
+ if (power_zone)
+ power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+ if (power_zone)
+ return power_zone->private_data;
+ return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type: Pointer to client allocated memory for the control type
+* structure storage. If this is NULL, powercap framework
+* will allocate memory and own it.
+* Advantage of this parameter is that client can embed
+* this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type_name: The Name of this control_type, which will be shown
+* in the sysfs Interface.
+* @ops: Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance: A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone: Pointer to client allocated memory for the power zone structure
+* storage. If this is NULL, powercap framework will allocate
+* memory and own it. Advantage of this parameter is that client
+* can embed this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name: A name for this zone.
+* @parent: A pointer to the parent power zone instance if any or NULL
+* @ops: Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops: Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it. Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type: A pointer to the valid instance of a control_type.
+* @power_zone: A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type. Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone);
+
+#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index f5d4723cdb3d..a3d9dc8c2c00 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -6,106 +6,95 @@
* preempt_count (used for kernel preemption, interrupt count, etc.)
*/
-#include <linux/thread_info.h>
#include <linux/linkage.h>
#include <linux/list.h>
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
- extern void add_preempt_count(int val);
- extern void sub_preempt_count(int val);
-#else
-# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
-# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
-#endif
-
-#define inc_preempt_count() add_preempt_count(1)
-#define dec_preempt_count() sub_preempt_count(1)
-
-#define preempt_count() (current_thread_info()->preempt_count)
-
-#ifdef CONFIG_PREEMPT
-
-asmlinkage void preempt_schedule(void);
-
-#define preempt_check_resched() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule(); \
-} while (0)
-
-#ifdef CONFIG_CONTEXT_TRACKING
+/*
+ * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
+ * the other bits -- can't include that header due to inclusion hell.
+ */
+#define PREEMPT_NEED_RESCHED 0x80000000
-void preempt_schedule_context(void);
+#include <asm/preempt.h>
-#define preempt_check_resched_context() \
-do { \
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
- preempt_schedule_context(); \
-} while (0)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+extern void preempt_count_add(int val);
+extern void preempt_count_sub(int val);
+#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#else
+#define preempt_count_add(val) __preempt_count_add(val)
+#define preempt_count_sub(val) __preempt_count_sub(val)
+#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
+#endif
-#define preempt_check_resched_context() preempt_check_resched()
-
-#endif /* CONFIG_CONTEXT_TRACKING */
-
-#else /* !CONFIG_PREEMPT */
-
-#define preempt_check_resched() do { } while (0)
-#define preempt_check_resched_context() do { } while (0)
-
-#endif /* CONFIG_PREEMPT */
+#define __preempt_count_inc() __preempt_count_add(1)
+#define __preempt_count_dec() __preempt_count_sub(1)
+#define preempt_count_inc() preempt_count_add(1)
+#define preempt_count_dec() preempt_count_sub(1)
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
do { \
- inc_preempt_count(); \
+ preempt_count_inc(); \
barrier(); \
} while (0)
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
- dec_preempt_count(); \
+ preempt_count_dec(); \
} while (0)
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
- preempt_enable_no_resched(); \
barrier(); \
- preempt_check_resched(); \
+ if (unlikely(preempt_count_dec_and_test())) \
+ __preempt_schedule(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (should_resched()) \
+ __preempt_schedule(); \
} while (0)
-/* For debugging and tracer internals only! */
-#define add_preempt_count_notrace(val) \
- do { preempt_count() += (val); } while (0)
-#define sub_preempt_count_notrace(val) \
- do { preempt_count() -= (val); } while (0)
-#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
-#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+#else
+#define preempt_enable() preempt_enable_no_resched()
+#define preempt_check_resched() do { } while (0)
+#endif
#define preempt_disable_notrace() \
do { \
- inc_preempt_count_notrace(); \
+ __preempt_count_inc(); \
barrier(); \
} while (0)
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
- dec_preempt_count_notrace(); \
+ __preempt_count_dec(); \
} while (0)
-/* preempt_check_resched is OK to trace */
+#ifdef CONFIG_PREEMPT
+
+#ifndef CONFIG_CONTEXT_TRACKING
+#define __preempt_schedule_context() __preempt_schedule()
+#endif
+
#define preempt_enable_notrace() \
do { \
- preempt_enable_no_resched_notrace(); \
barrier(); \
- preempt_check_resched_context(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_context(); \
} while (0)
+#else
+#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
+#endif
#else /* !CONFIG_PREEMPT_COUNT */
@@ -115,10 +104,11 @@ do { \
* that can cause faults and scheduling migrate into our preempt-protected
* region.
*/
-#define preempt_disable() barrier()
+#define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
+#define preempt_enable_no_resched() barrier()
+#define preempt_enable() barrier()
+#define preempt_check_resched() do { } while (0)
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
index 931bc616219f..d169820203dd 100644
--- a/include/linux/preempt_mask.h
+++ b/include/linux/preempt_mask.h
@@ -11,36 +11,23 @@
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
- * The hardirq count can in theory reach the same as NR_IRQS.
- * In reality, the number of nested IRQS is limited to the stack
- * size as well. For archs with over 1000 IRQS it is not practical
- * to expect that they will all nest. We give a max of 10 bits for
- * hardirq nesting. An arch may choose to give less than 10 bits.
- * m68k expects it to be 8.
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
*
- * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
- * - bit 26 is the NMI_MASK
- * - bit 27 is the PREEMPT_ACTIVE flag
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x03ff0000
- * NMI_MASK: 0x04000000
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x000f0000
+ * NMI_MASK: 0x00100000
+ * PREEMPT_ACTIVE: 0x00200000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
#define NMI_BITS 1
-#define MAX_HARDIRQ_BITS 10
-
-#ifndef HARDIRQ_BITS
-# define HARDIRQ_BITS MAX_HARDIRQ_BITS
-#endif
-
-#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
-#error HARDIRQ_BITS too high!
-#endif
-
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
@@ -60,15 +47,9 @@
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#ifndef PREEMPT_ACTIVE
#define PREEMPT_ACTIVE_BITS 1
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-#endif
-
-#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
-#error PREEMPT_ACTIVE is too low!
-#endif
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
diff --git a/include/linux/printk.h b/include/linux/printk.h
index e6131a782481..694925837a16 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -233,6 +233,8 @@ extern asmlinkage void dump_stack(void) __cold;
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
+#include <linux/dynamic_debug.h>
+
/* If you are writing a driver, please use dev_dbg instead */
#if defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
@@ -343,7 +345,19 @@ extern asmlinkage void dump_stack(void) __cold;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define pr_debug_ratelimited(fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ __ratelimit(&_rs)) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 56f4a866539a..2de2e275b2cb 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -6,6 +6,9 @@
#include <linux/backlight.h>
+/* TODO: convert to gpiod_*() API once it has been merged */
+#define PWM_BACKLIGHT_GPIO_ACTIVE_LOW (1 << 0)
+
struct platform_pwm_backlight_data {
int pwm_id;
unsigned int max_brightness;
@@ -13,6 +16,8 @@ struct platform_pwm_backlight_data {
unsigned int lth_brightness;
unsigned int pwm_period_ns;
unsigned int *levels;
+ int enable_gpio;
+ unsigned long enable_gpio_flags;
int (*init)(struct device *dev);
int (*notify)(struct device *dev, int brightness);
void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index d13371134c59..cc7494a35429 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -328,6 +328,7 @@ struct quotactl_ops {
int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
int (*set_xstate)(struct super_block *, unsigned int, int);
+ int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
};
struct quota_format_type {
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c38b0a..403940787be1 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -231,6 +231,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_maybe_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 0f424698064f..73069cb6c54a 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -101,6 +101,7 @@ extern const struct raid6_calls raid6_altivec8;
extern const struct raid6_calls raid6_avx2x1;
extern const struct raid6_calls raid6_avx2x2;
extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_tilegx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 69e37c2d1ea5..753207c8ce20 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -25,7 +25,7 @@ extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
-extern int __init init_rootfs(void);
+extern int __init init_ramfs_fs(void);
int ramfs_fill_super(struct super_block *sb, void *data, int silent);
diff --git a/include/linux/random.h b/include/linux/random.h
index 3b9377d6b7a5..4002b3df4c85 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
@@ -28,8 +29,13 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
u32 prandom_u32(void);
void prandom_bytes(void *buf, int nbytes);
void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
-u32 prandom_u32_state(struct rnd_state *);
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
/*
@@ -49,9 +55,10 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
{
u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
- state->s1 = __seed(i, 1);
- state->s2 = __seed(i, 7);
- state->s3 = __seed(i, 15);
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
}
#ifdef CONFIG_ARCH_RANDOM
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index 0022c1bb1e26..57e75ae9910f 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -68,6 +68,10 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
@@ -81,4 +85,24 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
*rb_link = node;
}
+#define rb_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4106721c4e5e..45a0a9e81478 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -19,6 +19,21 @@
*/
/*
+ * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
+ * @list: list to be initialized
+ *
+ * You should instead use INIT_LIST_HEAD() for normal initialization and
+ * cleanup tasks, when readers have no access to the list being initialized.
+ * However, if the list being initialized is visible to readers, you
+ * need to keep the compiler from being too mischievous.
+ */
+static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
+{
+ ACCESS_ONCE(list->next) = list;
+ ACCESS_ONCE(list->prev) = list;
+}
+
+/*
* return the ->next pointer of a list_head in an rcu safe
* way, we must not access it directly
*/
@@ -191,9 +206,13 @@ static inline void list_splice_init_rcu(struct list_head *list,
if (list_empty(list))
return;
- /* "first" and "last" tracking list, so initialize it. */
+ /*
+ * "first" and "last" tracking list, so initialize it. RCU readers
+ * have access to this list, so we must use INIT_LIST_HEAD_RCU()
+ * instead of INIT_LIST_HEAD().
+ */
- INIT_LIST_HEAD(list);
+ INIT_LIST_HEAD_RCU(list);
/*
* At this point, the list body still points to the source list.
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f1f1bc39346b..39cbb889e20d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
rcu_irq_exit(); \
} while (0)
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
+extern bool __rcu_is_watching(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
-extern int rcu_is_cpu_idle(void);
-#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
-
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
if (!debug_lockdep_rcu_enabled())
return 1;
- if (rcu_is_cpu_idle())
+ if (!rcu_is_watching())
return 0;
if (!rcu_lockdep_current_cpu_online())
return 0;
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
__rcu_read_lock();
__acquire(RCU);
rcu_lock_acquire(&rcu_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock() used illegally while idle");
}
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
*/
static inline void rcu_read_unlock(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
local_bh_disable();
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_bh() used illegally while idle");
}
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
*/
static inline void rcu_read_unlock_bh(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
preempt_disable();
__acquire(RCU_SCHED);
rcu_lock_acquire(&rcu_sched_lock_map);
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_lock_sched() used illegally while idle");
}
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
*/
static inline void rcu_read_unlock_sched(void)
{
- rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock_sched() used illegally while idle");
rcu_lock_release(&rcu_sched_lock_map);
__release(RCU_SCHED);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e31005ee339e..09ebcbe9fd78 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -132,4 +132,21 @@ static inline void rcu_scheduler_starting(void)
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
+
+static inline bool rcu_is_watching(void)
+{
+ return __rcu_is_watching();
+}
+
+#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
+static inline bool rcu_is_watching(void)
+{
+ return true;
+}
+
+
+#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 226169d1bd2b..4b9c81548742 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -90,4 +90,6 @@ extern void exit_rcu(void);
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
+extern bool rcu_is_watching(void);
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a10380bfbeac..e55907804d39 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -23,6 +23,7 @@ struct device;
struct i2c_client;
struct irq_domain;
struct spi_device;
+struct spmi_device;
struct regmap;
struct regmap_range_cfg;
struct regmap_field;
@@ -70,6 +71,8 @@ struct regmap_range {
unsigned int range_max;
};
+#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
+
/*
* A table of ranges including some yes ranges and some no ranges.
* If a register belongs to a no_range, the corresponding check function
@@ -318,6 +321,8 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config);
struct regmap *regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config);
+struct regmap *regmap_init_spmi(struct spmi_device *dev,
+ const struct regmap_config *config);
struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
@@ -330,6 +335,8 @@ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config);
struct regmap *devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config);
+struct regmap *devm_regmap_init_spmi(struct spmi_device *dev,
+ const struct regmap_config *config);
struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
void __iomem *regs,
const struct regmap_config *config);
@@ -374,10 +381,13 @@ int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
struct regmap *dev_get_regmap(struct device *dev, const char *name);
int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
size_t val_count);
+int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
+ int num_regs);
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
@@ -387,9 +397,14 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
int regmap_update_bits_check(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change);
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
int regmap_get_val_bytes(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
@@ -425,11 +440,15 @@ bool regmap_reg_in_ranges(unsigned int reg,
* @reg: Offset of the register within the regmap bank
* @lsb: lsb of the register field.
* @reg: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
*/
struct reg_field {
unsigned int reg;
unsigned int lsb;
unsigned int msb;
+ unsigned int id_size;
+ unsigned int id_offset;
};
#define REG_FIELD(_reg, _lsb, _msb) { \
@@ -448,6 +467,15 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
int regmap_field_read(struct regmap_field *field, unsigned int *val);
int regmap_field_write(struct regmap_field *field, unsigned int val);
+int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val);
+
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val);
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val);
/**
* Description of an IRQ for the generic regmap irq_chip.
@@ -527,6 +555,13 @@ static inline int regmap_write(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
@@ -576,6 +611,14 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_update_bits_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_update_bits_check(struct regmap *map,
unsigned int reg,
unsigned int mask, unsigned int val,
@@ -585,6 +628,16 @@ static inline int regmap_update_bits_check(struct regmap *map,
return -EINVAL;
}
+static inline int regmap_update_bits_check_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask,
+ unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_get_val_bytes(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 27be915caa96..e530681bea70 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -146,6 +146,32 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
+int regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void regulator_unregister_supply_alias(struct device *dev, const char *id);
+
+int regulator_bulk_register_supply_alias(struct device *dev, const char **id,
+ struct device *alias_dev,
+ const char **alias_id, int num_id);
+void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id, int num_id);
+
+int devm_regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id);
+
+int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id);
+void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id);
+
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
int regulator_disable(struct regulator *regulator);
@@ -250,6 +276,59 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline int regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int regulator_bulk_register_supply_alias(struct device *dev,
+ const char **id,
+ struct device *alias_dev,
+ const char **alias_id,
+ int num_id)
+{
+ return 0;
+}
+
+static inline void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char **id,
+ int num_id)
+{
+}
+
+static inline int devm_regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int devm_regulator_bulk_register_supply_alias(
+ struct device *dev, const char **id, struct device *alias_dev,
+ const char **alias_id, int num_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_bulk_unregister_supply_alias(
+ struct device *dev, const char **id, int num_id)
+{
+}
+
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 67e13aa5a478..9370e65348a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,23 +40,32 @@ enum regulator_status {
};
/**
+ * struct regulator_linear_range - specify linear voltage ranges
+ *
* Specify a range of voltages for regulator_map_linar_range() and
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
- * @max_uV: Highest voltage in range
* @min_sel: Lowest selector for range
* @max_sel: Highest selector for range
* @uV_step: Step size
*/
struct regulator_linear_range {
unsigned int min_uV;
- unsigned int max_uV;
unsigned int min_sel;
unsigned int max_sel;
unsigned int uV_step;
};
+/* Initialize struct regulator_linear_range */
+#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
+{ \
+ .min_uV = _min_uV, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .uV_step = _step_uV, \
+}
+
/**
* struct regulator_ops - regulator operations.
*
@@ -207,6 +216,7 @@ enum regulator_type {
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
* @linear_min_sel: Minimal selector for starting linear mapping
+ * @fixed_uV: Fixed voltage of rails.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @volt_table: Voltage mapping table (if table based mapping)
*
@@ -239,6 +249,7 @@ struct regulator_desc {
unsigned int min_uV;
unsigned int uV_step;
unsigned int linear_min_sel;
+ int fixed_uV;
unsigned int ramp_delay;
const struct regulator_linear_range *linear_ranges;
@@ -334,7 +345,12 @@ struct regulator_dev {
struct regulator_dev *
regulator_register(const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
+struct regulator_dev *
+devm_regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
+void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 999b20ce06cf..730e638c5589 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -95,6 +95,7 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @enable_time: Turn-on time of the rails (unit: microseconds)
*/
struct regulation_constraints {
@@ -129,6 +130,7 @@ struct regulation_constraints {
unsigned int initial_mode;
unsigned int ramp_delay;
+ unsigned int enable_time;
/* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
@@ -193,15 +195,10 @@ int regulator_suspend_finish(void);
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-void regulator_use_dummy_regulator(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
-
-static inline void regulator_use_dummy_regulator(void)
-{
-}
#endif
#endif
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 96a509b6be04..201a69749659 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -54,7 +54,7 @@ struct res_counter {
struct res_counter *parent;
};
-#define RESOURCE_MAX (unsigned long long)LLONG_MAX
+#define RES_COUNTER_MAX ULLONG_MAX
/**
* Helpers to interact with userspace
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index f28544b2f9af..939428ad25ac 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -15,7 +15,7 @@ extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
-extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
/* RTNL is used as a global lock for all changes to network configuration */
extern void rtnl_lock(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ce1e1c0aaa33..768b037dfacb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -22,6 +22,7 @@ struct sched_param {
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
+#include <linux/preempt_mask.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -285,6 +286,14 @@ static inline void lockup_detector_init(void)
}
#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+void reset_hung_task_detector(void);
+#else
+static inline void reset_hung_task_detector(void)
+{
+}
+#endif
+
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))
@@ -322,6 +331,10 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
+#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+#define SUID_DUMP_USER 1 /* Dump as user of process */
+#define SUID_DUMP_ROOT 2 /* Dump as root */
+
/* mm flags */
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
@@ -427,6 +440,14 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
+#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
+
+#ifdef CONFIG_PREEMPT_COUNT
+#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
+#else
+#define PREEMPT_DISABLED PREEMPT_ENABLED
+#endif
+
/*
* Disable preemption until the scheduler is running.
* Reset by start_kernel()->sched_init()->init_idle().
@@ -434,7 +455,7 @@ struct task_cputime {
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
* before the scheduler is active -- see should_resched().
*/
-#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
+#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
/**
* struct thread_group_cputimer - thread group interval timer counts
@@ -768,6 +789,7 @@ enum cpu_idle_type {
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
+#define SD_NUMA 0x4000 /* cross-node balancing */
extern int __weak arch_sd_sibiling_asym_packing(void);
@@ -809,7 +831,9 @@ struct sched_domain {
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
- u64 last_update;
+ /* idle_balance() stats */
+ u64 max_newidle_lb_cost;
+ unsigned long next_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
@@ -1029,6 +1053,8 @@ struct task_struct {
struct task_struct *last_wakee;
unsigned long wakee_flips;
unsigned long wakee_flip_decay_ts;
+
+ int wake_cpu;
#endif
int on_rq;
@@ -1046,15 +1072,6 @@ struct task_struct {
struct hlist_head preempt_notifiers;
#endif
- /*
- * fpu_counter contains the number of consecutive context switches
- * that the FPU is used. If this is over a threshold, the lazy fpu
- * saving becomes unlazy to save the trap. This is an unsigned char
- * so that after 256 times the counter wraps and the behavior turns
- * lazy again; this to deal with bursty apps that only use FPU for
- * a short time
- */
- unsigned char fpu_counter;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
@@ -1324,10 +1341,41 @@ struct task_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
- int numa_migrate_seq;
unsigned int numa_scan_period;
+ unsigned int numa_scan_period_max;
+ int numa_preferred_nid;
+ int numa_migrate_deferred;
+ unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
struct callback_head numa_work;
+
+ struct list_head numa_entry;
+ struct numa_group *numa_group;
+
+ /*
+ * Exponential decaying average of faults on a per-node basis.
+ * Scheduling placement decisions are made based on the these counts.
+ * The values remain static for the duration of a PTE scan
+ */
+ unsigned long *numa_faults;
+ unsigned long total_numa_faults;
+
+ /*
+ * numa_faults_buffer records faults per node during the current
+ * scan window. When the scan completes, the counts in numa_faults
+ * decay and these values are copied.
+ */
+ unsigned long *numa_faults_buffer;
+
+ /*
+ * numa_faults_locality tracks if faults recorded during the last
+ * scan window were remote/local. The task scan period is adapted
+ * based on the locality of the faults with different weights
+ * depending on whether they were shared or private faults
+ */
+ unsigned long numa_faults_locality[2];
+
+ unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
struct rcu_head rcu;
@@ -1393,6 +1441,12 @@ struct task_struct {
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
unsigned int memcg_kmem_skip_account;
+ struct memcg_oom_info {
+ struct mem_cgroup *memcg;
+ gfp_t gfp_mask;
+ int order;
+ unsigned int may_oom:1;
+ } memcg_oom;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
@@ -1406,16 +1460,33 @@ struct task_struct {
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+#define TNF_MIGRATED 0x01
+#define TNF_NO_GROUP 0x02
+#define TNF_SHARED 0x04
+#define TNF_FAULT_LOCAL 0x08
+
#ifdef CONFIG_NUMA_BALANCING
-extern void task_numa_fault(int node, int pages, bool migrated);
+extern void task_numa_fault(int last_node, int node, int pages, int flags);
+extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
+extern void task_numa_free(struct task_struct *p);
+
+extern unsigned int sysctl_numa_balancing_migrate_deferred;
#else
-static inline void task_numa_fault(int node, int pages, bool migrated)
+static inline void task_numa_fault(int last_node, int node, int pages,
+ int flags)
{
}
+static inline pid_t task_numa_group_id(struct task_struct *p)
+{
+ return 0;
+}
static inline void set_numabalancing_state(bool enabled)
{
}
+static inline void task_numa_free(struct task_struct *p)
+{
+}
#endif
static inline struct pid *task_pid(struct task_struct *task)
@@ -1968,7 +2039,7 @@ extern void wake_up_new_task(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
-extern void sched_fork(struct task_struct *p);
+extern void sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
@@ -2169,15 +2240,15 @@ static inline bool thread_group_leader(struct task_struct *p)
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
-static inline int has_group_leader_pid(struct task_struct *p)
+static inline bool has_group_leader_pid(struct task_struct *p)
{
- return p->pid == p->tgid;
+ return task_pid(p) == p->signal->leader_pid;
}
static inline
-int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
- return p1->tgid == p2->tgid;
+ return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
@@ -2395,11 +2466,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
-static inline int need_resched(void)
-{
- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
-}
-
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -2468,36 +2534,105 @@ static inline int tsk_is_polling(struct task_struct *p)
{
return task_thread_info(p)->status & TS_POLLING;
}
-static inline void current_set_polling(void)
+static inline void __current_set_polling(void)
{
current_thread_info()->status |= TS_POLLING;
}
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
{
current_thread_info()->status &= ~TS_POLLING;
- smp_mb__after_clear_bit();
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb();
+
+ return unlikely(tif_need_resched());
}
#elif defined(TIF_POLLING_NRFLAG)
static inline int tsk_is_polling(struct task_struct *p)
{
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
-static inline void current_set_polling(void)
+
+static inline void __current_set_polling(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
}
-static inline void current_clr_polling(void)
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ *
+ * XXX: assumes set/clear bit are identical barrier wise.
+ */
+ smp_mb__after_clear_bit();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
{
clear_thread_flag(TIF_POLLING_NRFLAG);
}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_task()
+ */
+ smp_mb__after_clear_bit();
+
+ return unlikely(tif_need_resched());
+}
+
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
-static inline void current_set_polling(void) { }
-static inline void current_clr_polling(void) { }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
#endif
+static __always_inline bool need_resched(void)
+{
+ return unlikely(tif_need_resched());
+}
+
/*
* Thread group CPU time accounting.
*/
@@ -2539,6 +2674,11 @@ static inline unsigned int task_cpu(const struct task_struct *p)
return task_thread_info(p)->cpu;
}
+static inline int task_node(const struct task_struct *p)
+{
+ return cpu_to_node(task_cpu(p));
+}
+
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index bf8086b2506e..41467f8ff8ec 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -2,8 +2,8 @@
#define _SCHED_SYSCTL_H
#ifdef CONFIG_DETECT_HUNG_TASK
+extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
@@ -47,7 +47,6 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_period_reset;
extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_numa_balancing_settle_count;
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index fa7922c80a41..cddf0c2940b6 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -15,7 +15,7 @@ static inline void sched_clock_postinit(void) { }
#endif
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
-
-extern unsigned long long (*sched_clock_func)(void);
+extern void sched_clock_register(u64 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 7ce53ae1266b..5623a7f965b7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @xfrm_policy_delete_security:
* @ctx contains the xfrm_sec_ctx.
* Authorize deletion of xp->security.
- * @xfrm_state_alloc_security:
+ * @xfrm_state_alloc:
* @x contains the xfrm_state being added to the Security Association
* Database by the XFRM system.
* @sec_ctx contains the security context information being provided by
* the user-level SA generation program (e.g., setkey or racoon).
- * @secid contains the secid from which to take the mls portion of the context.
* Allocate a security structure to the x->security field; the security
* field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to either sec_ctx or polsec, with the mls portion
- * taken from secid in the latter case.
- * Return 0 if operation was successful (memory to allocate, legal context).
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
* @xfrm_state_free_security:
* @x contains the xfrm_state.
* Deallocate x->security.
@@ -1492,7 +1500,7 @@ struct security_operations {
int (*inode_alloc_security) (struct inode *inode);
void (*inode_free_security) (struct inode *inode);
int (*inode_init_security) (struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr, const char **name,
void **value, size_t *len);
int (*inode_create) (struct inode *dir,
struct dentry *dentry, umode_t mode);
@@ -1679,9 +1687,11 @@ struct security_operations {
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
- int (*xfrm_state_alloc_security) (struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid);
+ int (*xfrm_state_alloc) (struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
void (*xfrm_state_free_security) (struct xfrm_state *x);
int (*xfrm_state_delete_security) (struct xfrm_state *x);
int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
@@ -1770,7 +1780,7 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
initxattrs initxattrs, void *fs_data);
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
@@ -2094,8 +2104,8 @@ static inline int security_inode_init_security(struct inode *inode,
static inline int security_old_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- char **name, void **value,
- size_t *len)
+ const char **name,
+ void **value, size_t *len)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 4e32edc8f506..52e0097f61f0 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,6 +20,7 @@ struct seq_file {
size_t size;
size_t from;
size_t count;
+ size_t pad_until;
loff_t index;
loff_t read_pos;
u64 version;
@@ -79,6 +80,20 @@ static inline void seq_commit(struct seq_file *m, int num)
}
}
+/**
+ * seq_setwidth - set padding width
+ * @m: the seq_file handle
+ * @size: the max number of bytes to pad.
+ *
+ * Call seq_setwidth() for setting max width, then call seq_printf() etc. and
+ * finally call seq_pad() to pad the remaining bytes.
+ */
+static inline void seq_setwidth(struct seq_file *m, size_t size)
+{
+ m->pad_until = m->count + size;
+}
+void seq_pad(struct seq_file *m, char c);
+
char *mangle_path(char *s, const char *p, const char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 18299057402f..cf87a24c0f92 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -3,15 +3,21 @@
/*
* Reader/writer consistent mechanism without starving writers. This type of
* lock for data where the reader wants a consistent set of information
- * and is willing to retry if the information changes. Readers never
- * block but they may have to retry if a writer is in
- * progress. Writers do not wait for readers.
+ * and is willing to retry if the information changes. There are two types
+ * of readers:
+ * 1. Sequence readers which never block a writer but they may have to retry
+ * if a writer is in progress by detecting change in sequence number.
+ * Writers do not wait for a sequence reader.
+ * 2. Locking readers which will wait if a writer or another locking reader
+ * is in progress. A locking reader in progress will also block a writer
+ * from going forward. Unlike the regular rwlock, the read lock here is
+ * exclusive so that only one locking reader can get it.
*
- * This is not as cache friendly as brlock. Also, this will not work
+ * This is not as cache friendly as brlock. Also, this may not work well
* for data that contains pointers, because any writer could
* invalidate a pointer that a reader was following.
*
- * Expected reader usage:
+ * Expected non-blocking reader usage:
* do {
* seq = read_seqbegin(&foo);
* ...
@@ -28,6 +34,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
+#include <linux/lockdep.h>
#include <asm/processor.h>
/*
@@ -38,10 +45,50 @@
*/
typedef struct seqcount {
unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
} seqcount_t;
-#define SEQCNT_ZERO { 0 }
-#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
+static inline void __seqcount_init(seqcount_t *s, const char *name,
+ struct lock_class_key *key)
+{
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ lockdep_init_map(&s->dep_map, name, key, 0);
+ s->sequence = 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname } \
+
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
+ } while (0)
+
+static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+{
+ seqcount_t *l = (seqcount_t *)s;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
+ seqcount_release(&l->dep_map, 1, _RET_IP_);
+ local_irq_restore(flags);
+}
+
+#else
+# define SEQCOUNT_DEP_MAP_INIT(lockname)
+# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
+# define seqcount_lockdep_reader_access(x)
+#endif
+
+#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+
/**
* __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -70,6 +117,22 @@ repeat:
}
/**
+ * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * seqcount, but without any lockdep checking. Validity of the critical
+ * section is tested by checking read_seqcount_retry function.
+ */
+static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+{
+ unsigned ret = __read_seqcount_begin(s);
+ smp_rmb();
+ return ret;
+}
+
+/**
* read_seqcount_begin - begin a seq-read critical section
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
@@ -80,9 +143,8 @@ repeat:
*/
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = __read_seqcount_begin(s);
- smp_rmb();
- return ret;
+ seqcount_lockdep_reader_access(s);
+ return read_seqcount_begin_no_lockdep(s);
}
/**
@@ -102,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
+
+ seqcount_lockdep_reader_access(s);
smp_rmb();
return ret & ~1;
}
@@ -146,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
-static inline void write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
s->sequence++;
smp_wmb();
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+ write_seqcount_begin_nested(s, 0);
}
static inline void write_seqcount_end(seqcount_t *s)
{
+ seqcount_release(&s->dep_map, 1, _RET_IP_);
smp_wmb();
s->sequence++;
}
@@ -182,7 +253,7 @@ typedef struct {
*/
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
- .seqcount = SEQCNT_ZERO, \
+ .seqcount = SEQCNT_ZERO(lockname), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
@@ -268,4 +339,85 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
spin_unlock_irqrestore(&sl->lock, flags);
}
+/*
+ * A locking reader exclusively locks out other writers and locking readers,
+ * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void read_seqlock_excl(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+}
+
+static inline void read_sequnlock_excl(seqlock_t *sl)
+{
+ spin_unlock(&sl->lock);
+}
+
+/**
+ * read_seqbegin_or_lock - begin a sequence number check or locking block
+ * @lock: sequence lock
+ * @seq : sequence number to be checked
+ *
+ * First try it once optimistically without taking the lock. If that fails,
+ * take the lock. The sequence number is also used as a marker for deciding
+ * whether to be a reader (even) or writer (odd).
+ * N.B. seq must be initialized to an even number to begin with.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
+static inline void read_seqlock_excl_bh(seqlock_t *sl)
+{
+ spin_lock_bh(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+{
+ spin_unlock_bh(&sl->lock);
+}
+
+static inline void read_seqlock_excl_irq(seqlock_t *sl)
+{
+ spin_lock_irq(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+{
+ spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+ return flags;
+}
+
+#define read_seqlock_excl_irqsave(lock, flags) \
+ do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+
+static inline void
+read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&sl->lock, flags);
+}
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index b98291ac7f14..f729be981da0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -66,7 +66,6 @@ struct uart_ops {
void (*set_ldisc)(struct uart_port *, int new);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
- int (*set_wake)(struct uart_port *, unsigned int state);
/*
* Return a string describing the type of the port
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index d34049712a4d..50fe651da965 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -5,7 +5,7 @@
#include <linux/sh_dma.h>
/*
- * Generic header for SuperH (H)SCI(F) (used by sh/sh64/h8300 and related parts)
+ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
*/
#define SCIx_NOT_SUPPORTED (-1)
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index fe817918b30e..d9b436f09925 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -59,6 +59,9 @@
#ifndef _LINUX_SFI_H
#define _LINUX_SFI_H
+#include <linux/init.h>
+#include <linux/types.h>
+
/* Table signatures reserved by the SFI specification */
#define SFI_SIG_SYST "SYST"
#define SFI_SIG_FREQ "FREQ"
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 4e83f3e034f3..b7b43b82231e 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -33,13 +33,44 @@ struct sh_dmae_slave_config {
char mid_rid;
};
+/**
+ * struct sh_dmae_channel - DMAC channel platform data
+ * @offset: register offset within the main IOMEM resource
+ * @dmars: channel DMARS register offset
+ * @chclr_offset: channel CHCLR register offset
+ * @dmars_bit: channel DMARS field offset within the register
+ * @chclr_bit: bit position, to be set to reset the channel
+ */
struct sh_dmae_channel {
unsigned int offset;
unsigned int dmars;
- unsigned int dmars_bit;
unsigned int chclr_offset;
+ unsigned char dmars_bit;
+ unsigned char chclr_bit;
};
+/**
+ * struct sh_dmae_pdata - DMAC platform data
+ * @slave: array of slaves
+ * @slave_num: number of slaves in the above array
+ * @channel: array of DMA channels
+ * @channel_num: number of channels in the above array
+ * @ts_low_shift: shift of the low part of the TS field
+ * @ts_low_mask: low TS field mask
+ * @ts_high_shift: additional shift of the high part of the TS field
+ * @ts_high_mask: high TS field mask
+ * @ts_shift: array of Transfer Size shifts, indexed by TS value
+ * @ts_shift_num: number of shifts in the above array
+ * @dmaor_init: DMAOR initialisation value
+ * @chcr_offset: CHCR address offset
+ * @chcr_ie_bit: CHCR Interrupt Enable bit
+ * @dmaor_is_32bit: DMAOR is a 32-bit register
+ * @needs_tend_set: the TEND register has to be set
+ * @no_dmars: DMAC has no DMARS registers
+ * @chclr_present: DMAC has one or several CHCLR registers
+ * @chclr_bitwise: channel CHCLR registers are bitwise
+ * @slave_only: DMAC cannot be used for MEMCPY
+ */
struct sh_dmae_pdata {
const struct sh_dmae_slave_config *slave;
int slave_num;
@@ -59,42 +90,22 @@ struct sh_dmae_pdata {
unsigned int needs_tend_set:1;
unsigned int no_dmars:1;
unsigned int chclr_present:1;
+ unsigned int chclr_bitwise:1;
unsigned int slave_only:1;
};
-/* DMA register */
-#define SAR 0x00
-#define DAR 0x04
-#define TCR 0x08
-#define CHCR 0x0C
-#define DMAOR 0x40
-
-#define TEND 0x18 /* USB-DMAC */
-
/* DMAOR definitions */
#define DMAOR_AE 0x00000004
#define DMAOR_NMIF 0x00000002
#define DMAOR_DME 0x00000001
/* Definitions for the SuperH DMAC */
-#define REQ_L 0x00000000
-#define REQ_E 0x00080000
-#define RACK_H 0x00000000
-#define RACK_L 0x00040000
-#define ACK_R 0x00000000
-#define ACK_W 0x00020000
-#define ACK_H 0x00000000
-#define ACK_L 0x00010000
#define DM_INC 0x00004000
#define DM_DEC 0x00008000
#define DM_FIX 0x0000c000
#define SM_INC 0x00001000
#define SM_DEC 0x00002000
#define SM_FIX 0x00003000
-#define RS_IN 0x00000200
-#define RS_OUT 0x00000300
-#define TS_BLK 0x00000040
-#define TM_BUR 0x00000020
#define CHCR_DE 0x00000001
#define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 5b1c9848124c..f92c0a43c54c 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@ struct shdma_ops {
dma_addr_t (*slave_addr)(struct shdma_chan *);
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
- int (*set_slave)(struct shdma_chan *, int, bool);
+ int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
void (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
@@ -116,7 +116,6 @@ struct shdma_dev {
int shdma_request_irq(struct shdma_chan *, int,
unsigned long, const char *);
-void shdma_free_irq(struct shdma_chan *);
bool shdma_reset(struct shdma_dev *sdev);
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id);
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc60d75..9d55438bc4ad 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
+extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
+ unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index ac6b8ee07825..68c097077ef0 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,39 +4,67 @@
/*
* This struct is used to pass information from page reclaim to the shrinkers.
* We consolidate the values for easier extention later.
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
*/
struct shrink_control {
gfp_t gfp_mask;
- /* How many slab objects shrinker() should scan and try to reclaim */
+ /*
+ * How many objects scan_objects should scan and try to reclaim.
+ * This is reset before every call, so it is safe for callees
+ * to modify.
+ */
unsigned long nr_to_scan;
+
+ /* shrink from these nodes */
+ nodemask_t nodes_to_scan;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
};
+#define SHRINK_STOP (~0UL)
/*
* A callback you can register to apply pressure to ageable caches.
*
- * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
- * and a 'gfpmask'. It should look through the least-recently-used
- * 'nr_to_scan' entries and attempt to free them up. It should return
- * the number of objects which remain in the cache. If it returns -1, it means
- * it cannot do any scanning at this time (eg. there is a risk of deadlock).
+ * @count_objects should return the number of freeable items in the cache. If
+ * there are no objects to free or the number of freeable items cannot be
+ * determined, it should return 0. No deadlock checks should be done during the
+ * count callback - the shrinker relies on aggregating scan counts that couldn't
+ * be executed due to potential deadlocks to be run at a later call when the
+ * deadlock condition is no longer pending.
*
- * The 'gfpmask' refers to the allocation we are currently trying to
- * fulfil.
+ * @scan_objects will only be called if @count_objects returned a non-zero
+ * value for the number of freeable objects. The callout should scan the cache
+ * and attempt to free items from the cache. It should then return the number
+ * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
+ * due to potential deadlocks. If SHRINK_STOP is returned, then no further
+ * attempts to call the @scan_objects will be made from the current reclaim
+ * context.
*
- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
- * querying the cache size, so a fastpath for that case is appropriate.
+ * @flags determine the shrinker abilities, like numa awareness
*/
struct shrinker {
- int (*shrink)(struct shrinker *, struct shrink_control *sc);
+ unsigned long (*count_objects)(struct shrinker *,
+ struct shrink_control *sc);
+ unsigned long (*scan_objects)(struct shrinker *,
+ struct shrink_control *sc);
+
int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
+ unsigned long flags;
/* These are for internal use */
struct list_head list;
- atomic_long_t nr_in_batch; /* objs pending delete */
+ /* objs pending delete, per node */
+ atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-extern void register_shrinker(struct shrinker *);
+
+/* Flags */
+#define SHRINKER_NUMA_AWARE (1 << 0)
+
+extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2ddb48d9312c..215b5ea1cb30 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -318,9 +318,13 @@ enum {
SKB_GSO_GRE = 1 << 6,
- SKB_GSO_UDP_TUNNEL = 1 << 7,
+ SKB_GSO_IPIP = 1 << 7,
- SKB_GSO_MPLS = 1 << 8,
+ SKB_GSO_SIT = 1 << 8,
+
+ SKB_GSO_UDP_TUNNEL = 1 << 9,
+
+ SKB_GSO_MPLS = 1 << 10,
};
#if BITS_PER_LONG > 32
@@ -333,11 +337,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
-#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
- defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
-#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
-#endif
-
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -370,7 +369,6 @@ typedef unsigned char *sk_buff_data_t;
* @protocol: Packet protocol from driver
* @destructor: Destruct function
* @nfct: Associated connection, if any
- * @nfct_reasm: netfilter conntrack re-assembly pointer
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
@@ -459,9 +457,6 @@ struct sk_buff {
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack *nfct;
#endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
- struct sk_buff *nfct_reasm;
-#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
@@ -498,7 +493,7 @@ struct sk_buff {
* headers if needed
*/
__u8 encapsulation:1;
- /* 7/9 bit hole (depending on ndisc_nodetype presence) */
+ /* 6/8 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -585,8 +580,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb->_skb_refdst = (unsigned long)dst;
}
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
- bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+ bool force);
/**
* skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +629,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
return (struct rtable *)skb_dst(skb);
}
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
- bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize);
-extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
@@ -660,41 +655,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
static inline struct sk_buff *alloc_skb_head(gfp_t priority)
{
return __alloc_skb_head(priority, -1);
}
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
- gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
- gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
- int headroom, gfp_t gfp_mask);
-
-extern int pskb_expand_head(struct sk_buff *skb,
- int nhead, int ntail,
- gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
- unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
- int newheadroom, int newtailroom,
- gfp_t priority);
-extern int skb_to_sgvec(struct sk_buff *skb,
- struct scatterlist *sg, int offset,
- int len);
-extern int skb_cow_data(struct sk_buff *skb, int tailbits,
- struct sk_buff **trailer);
-extern int skb_pad(struct sk_buff *skb, int pad);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+ int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+ int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
- int getfrag(void *from, char *to, int offset,
- int len,int odd, struct sk_buff *skb),
- void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length);
struct skb_seq_state {
__u32 lower_offset;
@@ -706,18 +693,17 @@ struct skb_seq_state {
__u8 *frag_data;
};
-extern void skb_prepare_seq_read(struct sk_buff *skb,
- unsigned int from, unsigned int to,
- struct skb_seq_state *st);
-extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
- struct skb_seq_state *st);
-extern void skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+ struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
-extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
- unsigned int to, struct ts_config *config,
- struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config,
+ struct ts_state *state);
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
static inline __u32 skb_get_rxhash(struct sk_buff *skb)
{
if (!skb->l4_rxhash)
@@ -1095,7 +1081,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
-extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
@@ -1201,8 +1188,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
__skb_insert(newsk, prev, prev->next, list);
}
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
- struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
@@ -1221,7 +1208,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
@@ -1238,7 +1225,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
*
* A buffer cannot be placed on two lists at the same time.
*/
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
@@ -1249,7 +1236,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
* remove sk_buff from list. _Must_ be called atomically, and with
* the list known..
*/
-extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
@@ -1270,7 +1257,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
* so must be used with appropriate locks held only. The head item is
* returned or %NULL if the list is empty.
*/
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1274,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
* so must be used with appropriate locks held only. The tail item is
* returned or %NULL if the list is empty.
*/
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
@@ -1361,7 +1348,7 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
* @size: the length of the data
*
* As per __skb_fill_page_desc() -- initialises the @i'th fragment of
- * @skb to point to &size bytes at offset @off within @page. In
+ * @skb to point to @size bytes at offset @off within @page. In
* addition updates @skb such that @i is the last fragment.
*
* Does not take any additional reference on the fragment.
@@ -1373,8 +1360,11 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
- int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size, unsigned int truesize);
+
+void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
+ unsigned int truesize);
#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1408,8 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
/*
* Add data to an sk_buff
*/
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1419,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
return tmp;
}
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
@@ -1436,7 +1427,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
return skb->data;
}
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
@@ -1449,7 +1440,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
@@ -1753,7 +1744,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
#endif
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
@@ -1765,7 +1756,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
skb_set_tail_pointer(skb, len);
}
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
@@ -1838,7 +1829,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
@@ -1850,11 +1841,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
- unsigned int length,
- gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+ gfp_t gfp_mask);
/**
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2061,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
}
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
/**
* skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
@@ -2342,60 +2334,49 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
#define skb_walk_frags(skb, iter) \
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
- int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
- int noblock, int *err);
-extern unsigned int datagram_poll(struct file *file, struct socket *sock,
- struct poll_table_struct *wait);
-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
- int offset, struct iovec *to,
- int size);
-extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
- int hlen,
- struct iovec *iov);
-extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
- int offset,
- const struct iovec *from,
- int from_offset,
- int len);
-extern int zerocopy_sg_from_iovec(struct sk_buff *skb,
- const struct iovec *frm,
- int offset,
- size_t count);
-extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
- int offset,
- const struct iovec *to,
- int to_offset,
- int size);
-extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void skb_free_datagram_locked(struct sock *sk,
- struct sk_buff *skb);
-extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
- unsigned int flags);
-extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
- int len, __wsum csum);
-extern int skb_copy_bits(const struct sk_buff *skb, int offset,
- void *to, int len);
-extern int skb_store_bits(struct sk_buff *skb, int offset,
- const void *from, int len);
-extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
- int offset, u8 *to, int len,
- __wsum csum);
-extern int skb_splice_bits(struct sk_buff *skb,
- unsigned int offset,
- struct pipe_inode_info *pipe,
- unsigned int len,
- unsigned int flags);
-extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void skb_split(struct sk_buff *skb,
- struct sk_buff *skb1, const u32 len);
-extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
- int shiftlen);
-extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
- netdev_features_t features);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+ int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+ struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+ struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ const struct iovec *from, int from_offset,
+ int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+ int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+ const struct iovec *to, int to_offset,
+ int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+ int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ struct pipe_inode_info *pipe, unsigned int len,
+ unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+
+struct skb_checksum_ops {
+ __wsum (*update)(const void *mem, int len, __wsum wsum);
+ __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+};
+
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum, const struct skb_checksum_ops *ops);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -2440,7 +2421,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
memcpy(skb->data + offset, from, len);
}
-extern void skb_init(void);
+void skb_init(void);
static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
{
@@ -2483,12 +2464,12 @@ static inline ktime_t net_invalid_timestamp(void)
return ktime_set(0, 0);
}
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
@@ -2529,8 +2510,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
* generates a software time stamp (otherwise), then queues the clone
* to the error queue of the socket. Errors are silently ignored.
*/
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
- struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps);
static inline void sw_tx_timestamp(struct sk_buff *skb)
{
@@ -2562,8 +2543,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
*/
void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline int skb_csum_unnecessary(const struct sk_buff *skb)
{
@@ -2593,7 +2574,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
}
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2605,18 +2586,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
-static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
-{
- if (skb)
- atomic_inc(&skb->users);
-}
-static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
-{
- if (skb)
- kfree_skb(skb);
-}
-#endif
#ifdef CONFIG_BRIDGE_NETFILTER
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
@@ -2635,10 +2604,6 @@ static inline void nf_reset(struct sk_buff *skb)
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
#endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
- nf_conntrack_put_reasm(skb->nfct_reasm);
- skb->nfct_reasm = NULL;
-#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge);
skb->nf_bridge = NULL;
@@ -2660,10 +2625,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
nf_conntrack_get(src->nfct);
dst->nfctinfo = src->nfctinfo;
#endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
- dst->nfct_reasm = src->nfct_reasm;
- nf_conntrack_get_reasm(src->nfct_reasm);
-#endif
#ifdef CONFIG_BRIDGE_NETFILTER
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
@@ -2675,9 +2636,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct);
#endif
-#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
- nf_conntrack_put_reasm(dst->nfct_reasm);
-#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(dst->nf_bridge);
#endif
@@ -2732,28 +2690,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-extern u16 __skb_tx_hash(const struct net_device *dev,
- const struct sk_buff *skb,
- unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+ unsigned int num_tx_queues);
-#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
+#ifdef CONFIG_XFRM
return skb->sp;
-}
#else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
return NULL;
-}
#endif
+}
/* Keeps track of mac header offset relative to skb->head.
* It is useful for TSO of Tunneling protocol. e.g. GRE.
* For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header. */
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
struct skb_gso_cb {
- int mac_offset;
+ int mac_offset;
+ int encap_level;
};
#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
@@ -2783,12 +2740,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_size;
}
+/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_v6(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
}
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..1e2f4fe12773 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ * Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
@@ -51,7 +53,14 @@
* }
* rcu_read_unlock();
*
- * See also the comment on struct slab_rcu in mm/slab.c.
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
*/
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
@@ -94,6 +103,7 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
+#include <linux/kmemleak.h>
struct mem_cgroup;
/*
@@ -289,6 +299,57 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+ return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
+{
+ return kmem_cache_alloc(s, flags);
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
@@ -297,9 +358,105 @@ static __always_inline int kmalloc_index(size_t size)
#include <linux/slub_def.h>
#endif
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
+ kmemleak_alloc(ret, size, 1, flags);
+ return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
+ * For example, use this inside interrupt handlers.
+ *
+ * %GFP_HIGHUSER - Allocate pages from high memory.
+ *
+ * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ *
+ * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ *
+ * %GFP_NOWAIT - Allocation will not sleep.
+ *
+ * %GFP_THISNODE - Allocate node-local memory only.
+ *
+ * %GFP_DMA - Allocation suitable for DMA.
+ * Should only be used for kmalloc() caches. Otherwise, use a
+ * slab created with SLAB_DMA.
+ *
+ * Also it is possible to set different flags by OR'ing
+ * in one or more of the following additional @flags:
+ *
+ * %__GFP_COLD - Request cache-cold pages instead of
+ * trying to return cache-warm pages.
+ *
+ * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ *
+ * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
+ *
+ * %__GFP_NORETRY - If memory is not immediately available,
+ * then give up at once.
+ *
+ * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ *
+ * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ *
+ * There are other flags available as well, but these are not intended
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
#endif
+ }
+ return __kmalloc(size, flags);
+}
/*
* Determine size used for the nth kmalloc cache.
@@ -321,6 +478,23 @@ static __always_inline int kmalloc_size(int n)
return 0;
}
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ int i = kmalloc_index(size);
+
+ if (!i)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ flags, node, size);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
@@ -373,61 +547,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m);
void print_slabinfo_header(struct seq_file *m);
/**
- * kmalloc - allocate memory
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The @flags argument may be one of:
- *
- * %GFP_USER - Allocate memory on behalf of user. May sleep.
- *
- * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
- *
- * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
- * For example, use this inside interrupt handlers.
- *
- * %GFP_HIGHUSER - Allocate pages from high memory.
- *
- * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
- *
- * %GFP_NOFS - Do not make any fs calls while trying to get memory.
- *
- * %GFP_NOWAIT - Allocation will not sleep.
- *
- * %GFP_THISNODE - Allocate node-local memory only.
- *
- * %GFP_DMA - Allocation suitable for DMA.
- * Should only be used for kmalloc() caches. Otherwise, use a
- * slab created with SLAB_DMA.
- *
- * Also it is possible to set different flags by OR'ing
- * in one or more of the following additional @flags:
- *
- * %__GFP_COLD - Request cache-cold pages instead of
- * trying to return cache-warm pages.
- *
- * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
- *
- * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
- * (think twice before using).
- *
- * %__GFP_NORETRY - If memory is not immediately available,
- * then give up at once.
- *
- * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
- *
- * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
- *
- * There are other flags available as well, but these are not intended
- * for general use, and so are not documented here. For a full list of
- * potential flags, always refer to linux/gfp.h.
- *
- * kmalloc is the normal method of allocating memory
- * in the kernel.
- */
-static __always_inline void *kmalloc(size_t size, gfp_t flags);
-
-/**
* kmalloc_array - allocate memory for an array.
* @n: number of elements.
* @size: element size.
@@ -451,36 +570,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
-#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
-/**
- * kmalloc_node - allocate memory from a specific node
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: node to allocate from.
- *
- * kmalloc() for non-local nodes, used to allocate from a specific node
- * if available. Equivalent to kmalloc() in the non-NUMA single-node
- * case.
- */
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return kmalloc(size, flags);
-}
-
-static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-
-static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
- gfp_t flags, int node)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
-
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd401580bdd3..09bfffb08a56 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
/*
* Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
*/
struct kmem_cache {
@@ -41,8 +27,8 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- struct kmem_cache *slabp_cache;
- unsigned int slab_size;
+ struct kmem_cache *freelist_cache;
+ unsigned int freelist_size;
/* constructor func */
void (*ctor)(void *obj);
@@ -102,96 +88,4 @@ struct kmem_cache {
*/
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- struct kmem_cache *cachep;
- void *ret;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- ret = kmem_cache_alloc_trace(cachep, flags, size);
-
- return ret;
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size)
-{
- return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- struct kmem_cache *cachep;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-
-#endif /* CONFIG_NUMA */
-
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4a8516..000000000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
- gfp_t flags)
-{
- return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc_node(size, flags, node);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
- return kmalloc(size, flags);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276fa8713..f56bfa9e4526 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,18 +6,12 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <linux/kmemleak.h>
-
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slub */
+ FREE_FASTPATH, /* Free to cpu slab */
FREE_SLOWPATH, /* Freeing not to cpu slab */
FREE_FROZEN, /* Freeing to frozen slab */
FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
@@ -104,108 +98,4 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret;
-
- flags |= (__GFP_COMP | __GFP_KMEMCG);
- ret = (void *) __get_free_pages(flags, order);
- kmemleak_alloc(ret, size, 1, flags);
- return ret;
-}
-
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
- return true;
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-
- if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(kmalloc_caches[index],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#endif
-
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c181399f2c20..5da22ee42e16 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -28,6 +28,30 @@ extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
int wait);
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
+void __smp_call_function_single(int cpuid, struct call_single_data *data,
+ int wait);
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -74,9 +98,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait);
-void __smp_call_function_single(int cpuid, struct call_single_data *data,
- int wait);
-
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
@@ -85,35 +106,10 @@ void kick_all_cpus_sync(void);
/*
* Generic and arch helpers
*/
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt
-#else
-static inline void call_function_init(void) { }
-#endif
-
-/*
- * Call a function on all processors
- */
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
-
-/*
- * Call a function on processors specified by mask, which might include
- * the local one.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait);
-
-/*
- * Call a function on each processor for which the supplied function
- * cond_func returns a positive value. This may include the local
- * processor.
- */
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
/*
* Mark the boot cpu "online" so that it can call console drivers in
@@ -139,43 +135,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
}
#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func, info, wait) \
- ({ \
- unsigned long __flags; \
- local_irq_save(__flags); \
- func(info); \
- local_irq_restore(__flags); \
- 0; \
- })
-/*
- * Note we still need to test the mask even for UP
- * because we actually can get an empty mask from
- * code that on SMP might call us without the local
- * CPU in the mask.
- */
-#define on_each_cpu_mask(mask, func, info, wait) \
- do { \
- if (cpumask_test_cpu(0, (mask))) { \
- local_irq_disable(); \
- (func)(info); \
- local_irq_enable(); \
- } \
- } while (0)
-/*
- * Preemption is disabled here to make sure the cond_func is called under the
- * same condtions in UP and SMP.
- */
-#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
- do { \
- void *__info = (info); \
- preempt_disable(); \
- if ((cond_func)(0, __info)) { \
- local_irq_disable(); \
- (func)(__info); \
- local_irq_enable(); \
- } \
- preempt_enable(); \
- } while (0)
static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
index 32be8dbdf191..274bc0fa00af 100644
--- a/include/linux/spi/mmc_spi.h
+++ b/include/linux/spi/mmc_spi.h
@@ -7,6 +7,11 @@
struct device;
struct mmc_host;
+#define MMC_SPI_USE_CD_GPIO (1 << 0)
+#define MMC_SPI_USE_RO_GPIO (1 << 1)
+#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2)
+#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3)
+
/* Put this in platform_data of a device being used to manage an MMC/SD
* card slot. (Modeled after PXA mmc glue; see that for usage examples.)
*
@@ -21,17 +26,19 @@ struct mmc_spi_platform_data {
void *);
void (*exit)(struct device *, void *);
- /* sense switch on sd cards */
- int (*get_ro)(struct device *);
-
/*
- * If board does not use CD interrupts, driver can optimize polling
- * using this function.
+ * Card Detect and Read Only GPIOs. To enable debouncing on the card
+ * detect GPIO, set the cd_debounce to the debounce time in
+ * microseconds.
*/
- int (*get_cd)(struct device *);
+ unsigned int flags;
+ unsigned int cd_gpio;
+ unsigned int cd_debounce;
+ unsigned int ro_gpio;
/* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
unsigned long caps;
+ unsigned long caps2;
/* how long to debounce card detect, in msecs */
u16 detect_delay;
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
index 900f0e328235..a25bd6f65e7f 100644
--- a/include/linux/spi/rspi.h
+++ b/include/linux/spi/rspi.h
@@ -26,6 +26,8 @@ struct rspi_plat_data {
unsigned int dma_rx_id;
unsigned dma_width_16bit:1; /* DMAC read/write width = 16-bit */
+
+ u16 num_chipselect;
};
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 887116dbce2c..8c62ba74dd91 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,7 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/completion.h>
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -150,8 +151,7 @@ static inline void *spi_get_drvdata(struct spi_device *spi)
}
struct spi_message;
-
-
+struct spi_transfer;
/**
* struct spi_driver - Host side "protocol" driver
@@ -257,6 +257,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
* @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+ * @xfer_completion: used by core tranfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
* @rt: whether this queue is set to run as a realtime task
@@ -274,6 +277,16 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ * @set_cs: assert or deassert chip select, true to assert. May be called
+ * from interrupt context.
+ * @prepare_message: set up the controller to transfer a single message,
+ * for example doing DMA mapping. Called from threaded
+ * context.
+ * @transfer_one: transfer a single spi_transfer. When the
+ * driver is finished with this transfer it must call
+ * spi_finalize_current_transfer() so the subsystem can issue
+ * the next transfer
+ * @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself).
@@ -388,11 +401,25 @@ struct spi_master {
bool running;
bool rt;
bool auto_runtime_pm;
+ bool cur_msg_prepared;
+ struct completion xfer_completion;
int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master,
struct spi_message *mesg);
int (*unprepare_transfer_hardware)(struct spi_master *master);
+ int (*prepare_message)(struct spi_master *master,
+ struct spi_message *message);
+ int (*unprepare_message)(struct spi_master *master,
+ struct spi_message *message);
+
+ /*
+ * These hooks are for drivers that use a generic implementation
+ * of transfer_one_message() provied by the core.
+ */
+ void (*set_cs)(struct spi_device *spi, bool enable);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer);
/* gpio chip select */
int *cs_gpios;
@@ -428,12 +455,15 @@ extern int spi_master_resume(struct spi_master *master);
/* Calls the driver make to interact with the message queue */
extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
extern void spi_finalize_current_message(struct spi_master *master);
+extern void spi_finalize_current_transfer(struct spi_master *master);
/* the spi driver core manages memory for the spi_master classdev */
extern struct spi_master *
spi_alloc_master(struct device *host, unsigned size);
extern int spi_register_master(struct spi_master *master);
+extern int devm_spi_register_master(struct device *dev,
+ struct spi_master *master);
extern void spi_unregister_master(struct spi_master *master);
extern struct spi_master *spi_busnum_to_master(u16 busnum);
@@ -823,6 +853,33 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
return (status < 0) ? status : result;
}
+/**
+ * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This returns the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code. Callable only from contexts that
+ * can sleep.
+ *
+ * This function is similar to spi_w8r16, with the exception that it will
+ * convert the read 16 bit data word from big-endian to native endianness.
+ *
+ */
+static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
+
+{
+ ssize_t status;
+ __be16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+ if (status < 0)
+ return status;
+
+ return be16_to_cpu(result);
+}
+
/*---------------------------------------------------------------------------*/
/*
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c114614ed172..9b058eecd403 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -237,4 +237,18 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx);
}
+/**
+ * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
+ *
+ * Converts the preceding srcu_read_unlock into a two-way memory barrier.
+ *
+ * Call this after srcu_read_unlock, to guarantee that all memory operations
+ * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
+ * the preceding srcu_read_unlock.
+ */
+static inline void smp_mb__after_srcu_read_unlock(void)
+{
+ /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+}
+
#endif
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 86a12b0cb239..0688472500bb 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
return 0;
}
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ return dev->dev->bus->sprom.et0phyaddr;
+}
+
extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
struct pci_dev *pdev);
extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
return -ENODEV;
}
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
#endif /* CONFIG_SSB_DRIVER_GIGE */
#endif /* LINUX_SSB_DRIVER_GIGE_H_ */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 3b5e910d14ca..d2abbdb8c6aa 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -28,6 +28,7 @@ struct cpu_stop_work {
};
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 0dd00f4f6810..790be1472792 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -24,12 +24,21 @@
struct rpcsec_gss_info;
+/* auth_cred ac_flags bits */
+enum {
+ RPC_CRED_NO_CRKEY_TIMEOUT = 0, /* underlying cred has no key timeout */
+ RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
+ RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
+ key will expire soon */
+};
+
/* Work around the lack of a VFS credential */
struct auth_cred {
kuid_t uid;
kgid_t gid;
struct group_info *group_info;
const char *principal;
+ unsigned long ac_flags;
unsigned char machine_cred : 1;
};
@@ -87,6 +96,11 @@ struct rpc_auth {
/* per-flavor data */
};
+struct rpc_auth_create_args {
+ rpc_authflavor_t pseudoflavor;
+ const char *target_name;
+};
+
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
@@ -97,17 +111,17 @@ struct rpc_authops {
struct module *owner;
rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */
char * au_name;
- struct rpc_auth * (*create)(struct rpc_clnt *, rpc_authflavor_t);
+ struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
void (*destroy)(struct rpc_auth *);
struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int);
- int (*pipes_create)(struct rpc_auth *);
- void (*pipes_destroy)(struct rpc_auth *);
int (*list_pseudoflavors)(rpc_authflavor_t *, int);
rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
int (*flavor2info)(rpc_authflavor_t,
struct rpcsec_gss_info *);
+ int (*key_timeout)(struct rpc_auth *,
+ struct rpc_cred *);
};
struct rpc_credops {
@@ -124,6 +138,8 @@ struct rpc_credops {
void *, __be32 *, void *);
int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
void *, __be32 *, void *);
+ int (*crkey_timeout)(struct rpc_cred *);
+ bool (*crkey_to_expire)(struct rpc_cred *);
};
extern const struct rpc_authops authunix_ops;
@@ -140,7 +156,8 @@ struct rpc_cred * rpc_lookup_cred(void);
struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
-struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
+struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *,
+ struct rpc_clnt *);
void rpcauth_release(struct rpc_auth *);
rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
struct rpcsec_gss_info *);
@@ -162,6 +179,9 @@ int rpcauth_uptodatecred(struct rpc_task *);
int rpcauth_init_credcache(struct rpc_auth *);
void rpcauth_destroy_credcache(struct rpc_auth *);
void rpcauth_clear_credcache(struct rpc_cred_cache *);
+int rpcauth_key_timeout_notify(struct rpc_auth *,
+ struct rpc_cred *);
+bool rpcauth_cred_key_to_expire(struct rpc_cred *);
static inline
struct rpc_cred * get_rpccred(struct rpc_cred *cred)
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 6ce690de447f..437ddb6c4aef 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -264,12 +264,30 @@ static inline int get_uint(char **bpp, unsigned int *anint)
return 0;
}
+static inline int get_time(char **bpp, time_t *time)
+{
+ char buf[50];
+ long long ll;
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtoll(buf, 0, &ll))
+ return -EINVAL;
+
+ *time = (time_t)ll;
+ return 0;
+}
+
static inline time_t get_expiry(char **bpp)
{
- int rv;
+ time_t rv;
struct timespec boot;
- if (get_int(bpp, &rv))
+ if (get_time(bpp, &rv))
return 0;
if (rv < 0)
return 0;
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index bfe11be81f6f..8af2804bab16 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -21,6 +21,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/timer.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
#include <asm/signal.h>
#include <linux/path.h>
#include <net/ipv6.h>
@@ -32,6 +33,7 @@ struct rpc_inode;
*/
struct rpc_clnt {
atomic_t cl_count; /* Number of references */
+ unsigned int cl_clid; /* client id */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
spinlock_t cl_lock; /* spinlock */
@@ -41,13 +43,13 @@ struct rpc_clnt {
cl_vers, /* RPC version number */
cl_maxproc; /* max procedure number */
- const char * cl_protname; /* protocol name */
struct rpc_auth * cl_auth; /* authenticator */
struct rpc_stat * cl_stats; /* per-program statistics */
struct rpc_iostats * cl_metrics; /* per-client statistics */
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_discrtry : 1,/* disconnect before retry */
+ cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
cl_chatty : 1;/* be verbose */
@@ -56,12 +58,11 @@ struct rpc_clnt {
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME];
- struct dentry * cl_dentry;
+ struct rpc_pipe_dir_head cl_pipedir_objects;
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
const struct rpc_program *cl_program;
- char *cl_principal; /* target to authenticate to */
};
/*
@@ -126,6 +127,7 @@ struct rpc_create_args {
#define RPC_CLNT_CREATE_QUIET (1UL << 6)
#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
+#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -134,6 +136,10 @@ void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
rpc_authflavor_t);
+int rpc_switch_client_transport(struct rpc_clnt *,
+ struct xprt_create *,
+ const struct rpc_timeout *);
+
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
void rpc_task_release_client(struct rpc_task *);
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index aa5b582cc471..a353e0300b54 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -5,6 +5,26 @@
#include <linux/workqueue.h>
+struct rpc_pipe_dir_head {
+ struct list_head pdh_entries;
+ struct dentry *pdh_dentry;
+};
+
+struct rpc_pipe_dir_object_ops;
+struct rpc_pipe_dir_object {
+ struct list_head pdo_head;
+ const struct rpc_pipe_dir_object_ops *pdo_ops;
+
+ void *pdo_data;
+};
+
+struct rpc_pipe_dir_object_ops {
+ int (*create)(struct dentry *dir,
+ struct rpc_pipe_dir_object *pdo);
+ void (*destroy)(struct dentry *dir,
+ struct rpc_pipe_dir_object *pdo);
+};
+
struct rpc_pipe_msg {
struct list_head list;
void *data;
@@ -74,7 +94,24 @@ extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
struct rpc_clnt;
extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
-extern int rpc_remove_client_dir(struct dentry *);
+extern int rpc_remove_client_dir(struct rpc_clnt *);
+
+extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh);
+extern void rpc_init_pipe_dir_object(struct rpc_pipe_dir_object *pdo,
+ const struct rpc_pipe_dir_object_ops *pdo_ops,
+ void *pdo_data);
+extern int rpc_add_pipe_dir_object(struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ struct rpc_pipe_dir_object *pdo);
+extern void rpc_remove_pipe_dir_object(struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ struct rpc_pipe_dir_object *pdo);
+extern struct rpc_pipe_dir_object *rpc_find_or_alloc_pipe_dir_object(
+ struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ int (*match)(struct rpc_pipe_dir_object *, void *),
+ struct rpc_pipe_dir_object *(*alloc)(void *),
+ void *data);
struct cache_detail;
extern struct dentry *rpc_create_cache_dir(struct dentry *,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 1821445708d6..3a847de83fab 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -79,7 +79,7 @@ struct rpc_task {
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
-#ifdef RPC_DEBUG
+#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
unsigned short tk_pid; /* debugging aid */
#endif
unsigned char tk_priority : 2,/* Task priority */
@@ -122,6 +122,7 @@ struct rpc_task_setup {
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
+#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 1f0216b9a6c9..6eecfc2e4f98 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -243,7 +243,6 @@ struct svc_rqst {
struct xdr_buf rq_res;
struct page * rq_pages[RPCSVC_MAXPAGES];
struct page * *rq_respages; /* points into rq_pages */
- int rq_resused; /* number of pages used for result */
struct page * *rq_next_page; /* next reply page to use */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index cec7b9b5e1bf..8097b9df6773 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -288,7 +288,7 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
-int xprt_prepare_transmit(struct rpc_task *task);
+bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d95cde5e257d..46ba0c6c219f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -182,6 +182,33 @@ enum {
#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
/*
+ * We use this to track usage of a cluster. A cluster is a block of swap disk
+ * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
+ * free clusters are organized into a list. We fetch an entry from the list to
+ * get a free cluster.
+ *
+ * The data field stores next cluster if the cluster is free or cluster usage
+ * counter otherwise. The flags field determines if a cluster is free. This is
+ * protected by swap_info_struct.lock.
+ */
+struct swap_cluster_info {
+ unsigned int data:24;
+ unsigned int flags:8;
+};
+#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
+#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+
+/*
+ * We assign a cluster to each CPU, so each CPU can allocate swap entry from
+ * its own cluster and swapout sequentially. The purpose is to optimize swapout
+ * throughput.
+ */
+struct percpu_cluster {
+ struct swap_cluster_info index; /* Current cluster index */
+ unsigned int next; /* Likely next allocation offset */
+};
+
+/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
@@ -191,14 +218,16 @@ struct swap_info_struct {
signed char next; /* next type on the swap list */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
+ struct swap_cluster_info free_cluster_head; /* free cluster list head */
+ struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
unsigned int lowest_bit; /* index of first free in swap_map */
unsigned int highest_bit; /* index of last free in swap_map */
unsigned int pages; /* total of usable pages of swap */
unsigned int inuse_pages; /* number of those currently in use */
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
- unsigned int lowest_alloc; /* while preparing discard cluster */
- unsigned int highest_alloc; /* while preparing discard cluster */
+ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct swap_extent *curr_swap_extent;
struct swap_extent first_swap_extent;
struct block_device *bdev; /* swap device or bdev of swap file */
@@ -212,14 +241,18 @@ struct swap_info_struct {
* protect map scan related fields like
* swap_map, lowest_bit, highest_bit,
* inuse_pages, cluster_next,
- * cluster_nr, lowest_alloc and
- * highest_alloc. other fields are only
- * changed at swapon/swapoff, so are
- * protected by swap_lock. changing
- * flags need hold this lock and
- * swap_lock. If both locks need hold,
- * hold swap_lock first.
+ * cluster_nr, lowest_alloc,
+ * highest_alloc, free/discard cluster
+ * list. other fields are only changed
+ * at swapon/swapoff, so are protected
+ * by swap_lock. changing flags need
+ * hold this lock and swap_lock. If
+ * both locks need hold, hold swap_lock
+ * first.
*/
+ struct work_struct discard_work; /* discard worker */
+ struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
+ struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
};
struct swap_list_t {
@@ -247,7 +280,7 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
-extern int lru_add_drain_all(void);
+extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void swap_setup(void);
@@ -414,6 +447,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
#else /* CONFIG_SWAP */
+#define swap_address_space(entry) (NULL)
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8d4fa82bfb91..c0f75261a728 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma,
+ struct mm_struct *mm, pte_t *pte);
#else
#define make_migration_entry(page, write) swp_entry(0, 0)
@@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
-static inline void migration_entry_wait_huge(struct mm_struct *mm,
- pte_t *pte) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+ struct mm_struct *mm, pte_t *pte) { }
static inline int is_write_migration_entry(swp_entry_t entry)
{
return 0;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 84662ecc7b51..94273bbe6050 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
- .flags = TRACE_EVENT_FL_CAP_ANY, \
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
@@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
- .flags = TRACE_EVENT_FL_CAP_ANY, \
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
@@ -184,8 +184,10 @@ extern struct trace_event_functions exit_syscall_print_funcs;
#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
#define __SYSCALL_DEFINEx(x, name, ...) \
- asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(SyS##name)))); \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
@@ -193,7 +195,6 @@ extern struct trace_event_functions exit_syscall_print_funcs;
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
- SYSCALL_ALIAS(sys##name, SyS##name); \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
asmlinkage long sys_time(time_t __user *tloc);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 11baec7c9b26..6695040a0317 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -173,7 +173,6 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
- const void *(*namespace)(struct kobject *, const struct attribute *);
};
struct sysfs_dirent;
@@ -183,19 +182,23 @@ struct sysfs_dirent;
int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
void *data, struct module *owner);
-int __must_check sysfs_create_dir(struct kobject *kobj);
+int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
void sysfs_remove_dir(struct kobject *kobj);
-int __must_check sysfs_rename_dir(struct kobject *kobj, const char *new_name);
-int __must_check sysfs_move_dir(struct kobject *kobj,
- struct kobject *new_parent_kobj);
-
-int __must_check sysfs_create_file(struct kobject *kobj,
- const struct attribute *attr);
+int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
+ const void *new_ns);
+int __must_check sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns);
+
+int __must_check sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
-void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
+void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
@@ -210,8 +213,9 @@ int __must_check sysfs_create_link_nowarn(struct kobject *kobj,
const char *name);
void sysfs_remove_link(struct kobject *kobj, const char *name);
-int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
- const char *old_name, const char *new_name);
+int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name,
+ const void *new_ns);
void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
const char *name);
@@ -241,9 +245,9 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
void sysfs_notify_dirent(struct sysfs_dirent *sd);
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name);
+struct sysfs_dirent *sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd,
+ const unsigned char *name,
+ const void *ns);
struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
void sysfs_put(struct sysfs_dirent *sd);
@@ -257,7 +261,7 @@ static inline int sysfs_schedule_callback(struct kobject *kobj,
return -ENOSYS;
}
-static inline int sysfs_create_dir(struct kobject *kobj)
+static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
return 0;
}
@@ -266,19 +270,22 @@ static inline void sysfs_remove_dir(struct kobject *kobj)
{
}
-static inline int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
+static inline int sysfs_rename_dir_ns(struct kobject *kobj,
+ const char *new_name, const void *new_ns)
{
return 0;
}
-static inline int sysfs_move_dir(struct kobject *kobj,
- struct kobject *new_parent_kobj)
+static inline int sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns)
{
return 0;
}
-static inline int sysfs_create_file(struct kobject *kobj,
- const struct attribute *attr)
+static inline int sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
{
return 0;
}
@@ -295,8 +302,9 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
-static inline void sysfs_remove_file(struct kobject *kobj,
- const struct attribute *attr)
+static inline void sysfs_remove_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
{
}
@@ -333,8 +341,9 @@ static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
-static inline int sysfs_rename_link(struct kobject *k, struct kobject *t,
- const char *old_name, const char *new_name)
+static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
+ const char *old_name,
+ const char *new_name, const void *ns)
{
return 0;
}
@@ -413,10 +422,9 @@ static inline void sysfs_notify(struct kobject *kobj, const char *dir,
static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
{
}
-static inline
-struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
- const void *ns,
- const unsigned char *name)
+static inline struct sysfs_dirent *
+sysfs_get_dirent_ns(struct sysfs_dirent *parent_sd, const unsigned char *name,
+ const void *ns)
{
return NULL;
}
@@ -435,4 +443,28 @@ static inline int __must_check sysfs_init(void)
#endif /* CONFIG_SYSFS */
+static inline int __must_check sysfs_create_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return sysfs_create_file_ns(kobj, attr, NULL);
+}
+
+static inline void sysfs_remove_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return sysfs_remove_file_ns(kobj, attr, NULL);
+}
+
+static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name)
+{
+ return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL);
+}
+
+static inline struct sysfs_dirent *
+sysfs_get_dirent(struct sysfs_dirent *parent_sd, const unsigned char *name)
+{
+ return sysfs_get_dirent_ns(parent_sd, name, NULL);
+}
+
#endif /* _SYSFS_H_ */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933cced7..387fa7d05c98 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -17,9 +17,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
-#define SYSRQ_DEFAULT_ENABLE 1
-
/* Possible values of bitmask for enabling sysrq functions */
/* 0x0001 is reserved for enable everything */
#define SYSRQ_ENABLE_LOG 0x0002
diff --git a/include/linux/tc_act/tc_defact.h b/include/linux/tc_act/tc_defact.h
deleted file mode 100644
index 6f65d07c7ce2..000000000000
--- a/include/linux/tc_act/tc_defact.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __LINUX_TC_DEF_H
-#define __LINUX_TC_DEF_H
-
-#include <linux/pkt_cls.h>
-
-struct tc_defact {
- tc_gen;
-};
-
-enum {
- TCA_DEF_UNSPEC,
- TCA_DEF_TM,
- TCA_DEF_PARMS,
- TCA_DEF_DATA,
- __TCA_DEF_MAX
-};
-#define TCA_DEF_MAX (__TCA_DEF_MAX - 1)
-
-#endif
diff --git a/include/linux/tegra-cpuidle.h b/include/linux/tegra-cpuidle.h
new file mode 100644
index 000000000000..9c6286bbf662
--- /dev/null
+++ b/include/linux/tegra-cpuidle.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_TEGRA_CPUIDLE_H__
+#define __LINUX_TEGRA_CPUIDLE_H__
+
+#ifdef CONFIG_CPU_IDLE
+void tegra_cpuidle_pcie_irqs_in_use(void);
+#else
+static inline void tegra_cpuidle_pcie_irqs_in_use(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/tegra-powergate.h b/include/linux/tegra-powergate.h
index 55c29a8d5015..fd4498329c7c 100644
--- a/include/linux/tegra-powergate.h
+++ b/include/linux/tegra-powergate.h
@@ -34,10 +34,18 @@ struct clk;
#define TEGRA_POWERGATE_CPU3 11
#define TEGRA_POWERGATE_CELP 12
#define TEGRA_POWERGATE_3D1 13
+#define TEGRA_POWERGATE_CPU0 14
+#define TEGRA_POWERGATE_C0NC 15
+#define TEGRA_POWERGATE_C1NC 16
+#define TEGRA_POWERGATE_DIS 18
+#define TEGRA_POWERGATE_DISB 19
+#define TEGRA_POWERGATE_XUSBA 20
+#define TEGRA_POWERGATE_XUSBB 21
+#define TEGRA_POWERGATE_XUSBC 22
-#define TEGRA_POWERGATE_CPU0 TEGRA_POWERGATE_CPU
#define TEGRA_POWERGATE_3D0 TEGRA_POWERGATE_3D
+#ifdef CONFIG_ARCH_TEGRA
int tegra_powergate_is_powered(int id);
int tegra_powergate_power_on(int id);
int tegra_powergate_power_off(int id);
@@ -45,5 +53,31 @@ int tegra_powergate_remove_clamping(int id);
/* Must be called with clk disabled, and returns with clk enabled */
int tegra_powergate_sequence_power_up(int id, struct clk *clk);
+#else
+static inline int tegra_powergate_is_powered(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_power_on(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_power_off(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_remove_clamping(int id)
+{
+ return -ENOSYS;
+}
+
+static inline int tegra_powergate_sequence_power_up(int id, struct clk *clk)
+{
+ return -ENOSYS;
+}
+#endif
#endif /* _MACH_TEGRA_POWERGATE_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a386a1cbb6e1..b268d3cf7ae3 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -207,6 +207,16 @@ struct thermal_bind_params {
* See Documentation/thermal/sysfs-api.txt for more information.
*/
int trip_mask;
+
+ /*
+ * This is an array of cooling state limits. Must have exactly
+ * 2 * thermal_zone.number_of_trip_points. It is an array consisting
+ * of tuples <lower-state upper-state> of state limits. Each trip
+ * will be associated with one state limit tuple when binding.
+ * A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
+ * on all trips.
+ */
+ unsigned long *binding_limits;
int (*match) (struct thermal_zone_device *tz,
struct thermal_cooling_device *cdev);
};
@@ -214,6 +224,14 @@ struct thermal_bind_params {
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
char governor_name[THERMAL_NAME_LENGTH];
+
+ /*
+ * a boolean to indicate if the thermal to hwmon sysfs interface
+ * is required. when no_hwmon == false, a hwmon sysfs interface
+ * will be created. when no_hwmon == true, nothing will be done
+ */
+ bool no_hwmon;
+
int num_tbps; /* Number of tbp entries */
struct thermal_bind_params *tbp;
};
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
new file mode 100644
index 000000000000..361de59a2285
--- /dev/null
+++ b/include/linux/thinkpad_acpi.h
@@ -0,0 +1,15 @@
+#ifndef __THINKPAD_ACPI_H__
+#define __THINKPAD_ACPI_H__
+
+/* These two functions return 0 if success, or negative error code
+ (e g -ENODEV if no led present) */
+
+enum {
+ TPACPI_LED_MUTE,
+ TPACPI_LED_MICMUTE,
+ TPACPI_LED_MAX,
+};
+
+int tpacpi_led_set(int whichled, bool on);
+
+#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e7e04736802f..fddbe2023a5d 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -104,8 +104,21 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
-#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
-#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
+static inline __deprecated void set_need_resched(void)
+{
+ /*
+ * Use of this function in deprecated.
+ *
+ * As of this writing there are only a few users in the DRM tree left
+ * all of which are wrong and can be removed without causing too much
+ * grief.
+ *
+ * The DRM people are aware and are working on removing the last few
+ * instances.
+ */
+}
+
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h
deleted file mode 100644
index dfdfdc03115b..000000000000
--- a/include/linux/time-armada-370-xp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Marvell Armada 370/XP SoC timer handling.
- *
- * Copyright (C) 2012 Marvell
- *
- * Lior Amsalem <alior@marvell.com>
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- */
-#ifndef __TIME_ARMADA_370_XPPRCMU_H
-#define __TIME_ARMADA_370_XPPRCMU_H
-
-#include <linux/init.h>
-
-void __init armada_370_xp_timer_init(void);
-
-#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b3726e61368e..9d3f1a5b6178 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -64,6 +64,20 @@
#include <asm/timex.h>
+#ifndef random_get_entropy
+/*
+ * The random_get_entropy() function is used by the /dev/random driver
+ * in order to extract entropy via the relative unpredictability of
+ * when an interrupt takes places versus a high speed, fine-grained
+ * timing source or cycle counter. Since it will be occurred on every
+ * single interrupt, it must have a very low cost/overhead.
+ *
+ * By default we use get_cycles() for this purpose, but individual
+ * architectures may override this in their asm/timex.h header file.
+ */
+#define random_get_entropy() get_cycles()
+#endif
+
/*
* SHIFT_PLL is used as a dampening factor to define how much we
* adjust the frequency correction for a given offset in PLL mode.
@@ -141,6 +155,7 @@ extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
int read_current_timer(unsigned long *timer_val);
+void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul
diff --git a/include/linux/topology.h b/include/linux/topology.h
index d3cf0d6e7712..12ae6ce997d6 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -106,6 +106,8 @@ int arch_update_cpu_topology(void);
.last_balance = jiffies, \
.balance_interval = 1, \
.smt_gain = 1178, /* 15% */ \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_SMT */
@@ -135,6 +137,8 @@ int arch_update_cpu_topology(void);
, \
.last_balance = jiffies, \
.balance_interval = 1, \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
#endif /* CONFIG_SCHED_MC */
@@ -166,6 +170,8 @@ int arch_update_cpu_topology(void);
, \
.last_balance = jiffies, \
.balance_interval = 1, \
+ .max_newidle_lb_cost = 0, \
+ .next_decay_max_lb_cost = jiffies, \
}
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index f0c3e4cb45a8..accc497f8d72 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -267,6 +267,8 @@ static inline void tracepoint_synchronize_unregister(void)
#define TRACE_EVENT_FLAGS(event, flag)
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
#endif /* DECLARE_TRACE */
#ifndef TRACE_EVENT
@@ -399,4 +401,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define TRACE_EVENT_FLAGS(event, flag)
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 64f864651d86..97d660ed70c1 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -180,7 +180,6 @@ struct tty_port_operations {
IFF the port was initialized. Do not use to free resources. Called
under the port mutex to serialize against activate/shutdowns */
void (*shutdown)(struct tty_port *port);
- void (*drop)(struct tty_port *port);
/* Called under the port mutex from tty_port_open, serialized using
the port mutex */
/* FIXME: long term getting the tty argument *out* of this would be
@@ -672,31 +671,17 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
#define wait_event_interruptible_tty(tty, wq, condition) \
({ \
int __ret = 0; \
- if (!(condition)) { \
- __wait_event_interruptible_tty(tty, wq, condition, __ret); \
- } \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_tty(tty, wq, \
+ condition); \
__ret; \
})
-#define __wait_event_interruptible_tty(tty, wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- tty_unlock(tty); \
+#define __wait_event_interruptible_tty(tty, wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ tty_unlock(tty); \
schedule(); \
- tty_lock(tty); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ tty_lock(tty))
#ifdef CONFIG_PROC_FS
extern void proc_tty_register_driver(struct tty_driver *);
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 8da8c4e87da3..7bfabd20204c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,6 +67,13 @@ struct u64_stats_sync {
#endif
};
+
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+# define u64_stats_init(syncp) seqcount_init(syncp.seq)
+#else
+# define u64_stats_init(syncp) do { } while (0)
+#endif
+
static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 5ca0951e1855..9d8cf056e661 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -15,7 +15,7 @@
*/
static inline void pagefault_disable(void)
{
- inc_preempt_count();
+ preempt_count_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
@@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
* the pagefault handler again.
*/
barrier();
- dec_preempt_count();
- /*
- * make sure we do..
- */
- barrier();
+ preempt_count_dec();
preempt_check_resched();
}
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 06f28beed7c2..319eae70fe84 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -30,6 +30,7 @@
struct vm_area_struct;
struct mm_struct;
struct inode;
+struct notifier_block;
#ifdef CONFIG_ARCH_SUPPORTS_UPROBES
# include <asm/uprobes.h>
@@ -108,6 +109,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -117,14 +119,21 @@ extern void uprobe_start_dup_mmap(void);
extern void uprobe_end_dup_mmap(void);
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
extern void uprobe_free_utask(struct task_struct *t);
-extern void uprobe_copy_process(struct task_struct *t);
+extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
extern void uprobe_notify_resume(struct pt_regs *regs);
extern bool uprobe_deny_signal(void);
-extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
extern void uprobe_clear_state(struct mm_struct *mm);
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
#else /* !CONFIG_UPROBES */
struct uprobes_state {
};
@@ -174,7 +183,7 @@ static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
static inline void uprobe_free_utask(struct task_struct *t)
{
}
-static inline void uprobe_copy_process(struct task_struct *t)
+static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
{
}
static inline void uprobe_clear_state(struct mm_struct *mm)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 001629cd1a97..512ab162832c 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -475,7 +475,8 @@ struct usb3_lpm_parameters {
* @lpm_capable: device supports LPM
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
* @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
- * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
+ * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
+ * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
* @usb3_lpm_enabled: USB3 hardware LPM enabled
* @string_langid: language ID for strings
* @product: iProduct string, if present (static)
@@ -548,6 +549,7 @@ struct usb_device {
unsigned usb2_hw_lpm_capable:1;
unsigned usb2_hw_lpm_besl_capable:1;
unsigned usb2_hw_lpm_enabled:1;
+ unsigned usb2_hw_lpm_allowed:1;
unsigned usb3_lpm_enabled:1;
int string_langid;
@@ -702,7 +704,7 @@ extern int usb_alloc_streams(struct usb_interface *interface,
unsigned int num_streams, gfp_t mem_flags);
/* Reverts a group of bulk endpoints back to not using stream IDs. */
-extern void usb_free_streams(struct usb_interface *interface,
+extern int usb_free_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags);
@@ -1209,11 +1211,13 @@ struct usb_anchor {
struct list_head urb_list;
wait_queue_head_t wait;
spinlock_t lock;
+ atomic_t suspend_wakeups;
unsigned int poisoned:1;
};
static inline void init_usb_anchor(struct usb_anchor *anchor)
{
+ memset(anchor, 0, sizeof(*anchor));
INIT_LIST_HEAD(&anchor->urb_list);
init_waitqueue_head(&anchor->wait);
spin_lock_init(&anchor->lock);
@@ -1260,6 +1264,8 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
+ * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
+ * Do not use urb->sg for interrupt endpoints for now, only bulk.)
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
@@ -1574,6 +1580,8 @@ extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
+extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
extern void usb_unanchor_urb(struct urb *urb);
extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index cc25b70af33c..c3fa80745996 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -36,6 +36,9 @@
* SUCH DAMAGE.
*/
+#ifndef __LINUX_USB_CDC_NCM_H
+#define __LINUX_USB_CDC_NCM_H
+
#define CDC_NCM_COMM_ALTSETTING_NCM 0
#define CDC_NCM_COMM_ALTSETTING_MBIM 1
@@ -85,22 +88,13 @@
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
struct cdc_ncm_ctx {
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
struct hrtimer tx_timer;
struct tasklet_struct bh;
const struct usb_cdc_ncm_desc *func_desc;
- const struct usb_cdc_mbim_desc *mbim_desc;
- const struct usb_cdc_header_desc *header_desc;
- const struct usb_cdc_union_desc *union_desc;
+ const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_ether_desc *ether_desc;
- struct net_device *netdev;
- struct usb_device *udev;
- struct usb_host_endpoint *in_ep;
- struct usb_host_endpoint *out_ep;
- struct usb_host_endpoint *status_ep;
- struct usb_interface *intf;
struct usb_interface *control;
struct usb_interface *data;
@@ -113,8 +107,6 @@ struct cdc_ncm_ctx {
u32 tx_timer_pending;
u32 tx_curr_frame_num;
- u32 rx_speed;
- u32 tx_speed;
u32 rx_max;
u32 tx_max;
u32 max_datagram_size;
@@ -127,9 +119,14 @@ struct cdc_ncm_ctx {
u16 connected;
};
-extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
-extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
-extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
-extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
-extern int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
-extern int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
+
+#endif /* __LINUX_USB_CDC_NCM_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 75efc45eaa2f..b8aba196f7f1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -73,6 +73,7 @@ struct giveback_urb_bh {
spinlock_t lock;
struct list_head head;
struct tasklet_struct bh;
+ struct usb_host_endpoint *completing_ep;
};
struct usb_hcd {
@@ -140,6 +141,7 @@ struct usb_hcd {
unsigned wireless:1; /* Wireless USB HCD */
unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
+ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
@@ -378,6 +380,12 @@ static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
return hcd->driver->flags & HCD_BH;
}
+static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ return hcd->high_prio_bh.completing_ep == ep;
+}
+
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
@@ -428,6 +436,8 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+
#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
@@ -496,6 +506,7 @@ struct usb_tt {
struct usb_device *hub; /* upstream highspeed hub */
int multi; /* true means one TT per port */
unsigned think_time; /* think time in ns */
+ void *hcpriv; /* HCD private data */
/* for control/bulk error recovery (CLEAR_TT_BUFFER) */
spinlock_t lock;
@@ -554,9 +565,8 @@ extern void usb_ep0_reinit(struct usb_device *);
* of (7/6 * 8 * bytecount) = 9.33 * bytecount */
/* bytecount = data payload byte count */
-#define NS_TO_US(ns) ((ns + 500L) / 1000L)
- /* convert & round nanoseconds to microseconds */
-
+#define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L)
+ /* convert nanoseconds to microseconds, rounding up */
/*
* Full/low speed bandwidth allocation constants/support.
diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h
deleted file mode 100644
index 756cf5543ffd..000000000000
--- a/include/linux/usb/intel_mid_otg.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Intel MID (Langwell/Penwell) USB OTG Transceiver driver
- * Copyright (C) 2008 - 2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef __INTEL_MID_OTG_H
-#define __INTEL_MID_OTG_H
-
-#include <linux/pm.h>
-#include <linux/usb/otg.h>
-#include <linux/notifier.h>
-
-struct intel_mid_otg_xceiv;
-
-/* This is a common data structure for Intel MID platform to
- * save values of the OTG state machine */
-struct otg_hsm {
- /* Input */
- int a_bus_resume;
- int a_bus_suspend;
- int a_conn;
- int a_sess_vld;
- int a_srp_det;
- int a_vbus_vld;
- int b_bus_resume;
- int b_bus_suspend;
- int b_conn;
- int b_se0_srp;
- int b_ssend_srp;
- int b_sess_end;
- int b_sess_vld;
- int id;
-/* id values */
-#define ID_B 0x05
-#define ID_A 0x04
-#define ID_ACA_C 0x03
-#define ID_ACA_B 0x02
-#define ID_ACA_A 0x01
- int power_up;
- int adp_change;
- int test_device;
-
- /* Internal variables */
- int a_set_b_hnp_en;
- int b_srp_done;
- int b_hnp_enable;
- int hnp_poll_enable;
-
- /* Timeout indicator for timers */
- int a_wait_vrise_tmout;
- int a_wait_bcon_tmout;
- int a_aidl_bdis_tmout;
- int a_bidl_adis_tmout;
- int a_bidl_adis_tmr;
- int a_wait_vfall_tmout;
- int b_ase0_brst_tmout;
- int b_bus_suspend_tmout;
- int b_srp_init_tmout;
- int b_srp_fail_tmout;
- int b_srp_fail_tmr;
- int b_adp_sense_tmout;
-
- /* Informative variables */
- int a_bus_drop;
- int a_bus_req;
- int a_clr_err;
- int b_bus_req;
- int a_suspend_req;
- int b_bus_suspend_vld;
-
- /* Output */
- int drv_vbus;
- int loc_conn;
- int loc_sof;
-
- /* Others */
- int vbus_srp_up;
-};
-
-/* must provide ULPI access function to read/write registers implemented in
- * ULPI address space */
-struct iotg_ulpi_access_ops {
- int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val);
- int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val);
-};
-
-#define OTG_A_DEVICE 0x0
-#define OTG_B_DEVICE 0x1
-
-/*
- * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact
- * with device and host drivers to implement the USB OTG related feature. More
- * function members are added based on usb_phy data structure for this
- * purpose.
- */
-struct intel_mid_otg_xceiv {
- struct usb_phy otg;
- struct otg_hsm hsm;
-
- /* base address */
- void __iomem *base;
-
- /* ops to access ulpi */
- struct iotg_ulpi_access_ops ulpi_ops;
-
- /* atomic notifier for interrupt context */
- struct atomic_notifier_head iotg_notifier;
-
- /* start/stop USB Host function */
- int (*start_host)(struct intel_mid_otg_xceiv *iotg);
- int (*stop_host)(struct intel_mid_otg_xceiv *iotg);
-
- /* start/stop USB Peripheral function */
- int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg);
- int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg);
-
- /* start/stop ADP sense/probe function */
- int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg,
- bool enabled, int dev);
- int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg,
- bool enabled);
-
-#ifdef CONFIG_PM
- /* suspend/resume USB host function */
- int (*suspend_host)(struct intel_mid_otg_xceiv *iotg,
- pm_message_t message);
- int (*resume_host)(struct intel_mid_otg_xceiv *iotg);
-
- int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg,
- pm_message_t message);
- int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg);
-#endif
-
-};
-static inline
-struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct usb_phy *otg)
-{
- return container_of(otg, struct intel_mid_otg_xceiv, otg);
-}
-
-#define MID_OTG_NOTIFY_CONNECT 0x0001
-#define MID_OTG_NOTIFY_DISCONN 0x0002
-#define MID_OTG_NOTIFY_HSUSPEND 0x0003
-#define MID_OTG_NOTIFY_HRESUME 0x0004
-#define MID_OTG_NOTIFY_CSUSPEND 0x0005
-#define MID_OTG_NOTIFY_CRESUME 0x0006
-#define MID_OTG_NOTIFY_HOSTADD 0x0007
-#define MID_OTG_NOTIFY_HOSTREMOVE 0x0008
-#define MID_OTG_NOTIFY_CLIENTADD 0x0009
-#define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a
-
-static inline int
-intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg,
- struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&iotg->iotg_notifier, nb);
-}
-
-static inline void
-intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg,
- struct notifier_block *nb)
-{
- atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb);
-}
-
-#endif /* __INTEL_MID_OTG_H */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index 053c26841cc3..eb505250940a 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -99,8 +99,6 @@ struct musb_hdrc_platform_data {
/* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
u8 mode;
- u8 has_mailbox:1;
-
/* for clk_get() */
const char *clock;
diff --git a/include/linux/usb/omap_control_usb.h b/include/linux/usb/omap_control_usb.h
index 27b5b8c931b0..596b01918813 100644
--- a/include/linux/usb/omap_control_usb.h
+++ b/include/linux/usb/omap_control_usb.h
@@ -19,20 +19,23 @@
#ifndef __OMAP_CONTROL_USB_H__
#define __OMAP_CONTROL_USB_H__
+enum omap_control_usb_type {
+ OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */
+ OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
+ OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */
+ OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
+};
+
struct omap_control_usb {
struct device *dev;
- u32 __iomem *dev_conf;
u32 __iomem *otghs_control;
- u32 __iomem *phy_power;
+ u32 __iomem *power;
+ u32 __iomem *power_aux;
struct clk *sys_clk;
- u32 type;
-};
-
-struct omap_control_usb_platform_data {
- u8 type;
+ enum omap_control_usb_type type;
};
enum omap_control_usb_mode {
@@ -42,10 +45,6 @@ enum omap_control_usb_mode {
USB_MODE_DISCONNECT,
};
-/* To differentiate ctrl module IP having either mailbox or USB3 PHY power */
-#define OMAP_CTRL_DEV_TYPE1 0x1
-#define OMAP_CTRL_DEV_TYPE2 0x2
-
#define OMAP_CTRL_DEV_PHY_PD BIT(0)
#define OMAP_CTRL_DEV_AVALID BIT(0)
@@ -63,26 +62,18 @@ enum omap_control_usb_mode {
#define OMAP_CTRL_USB3_PHY_TX_RX_POWERON 0x3
#define OMAP_CTRL_USB3_PHY_TX_RX_POWEROFF 0x0
+#define OMAP_CTRL_USB2_PHY_PD BIT(28)
+
#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB)
-extern struct device *omap_get_control_dev(void);
extern void omap_control_usb_phy_power(struct device *dev, int on);
-extern void omap_control_usb3_phy_power(struct device *dev, bool on);
extern void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode);
#else
-static inline struct device *omap_get_control_dev(void)
-{
- return ERR_PTR(-ENODEV);
-}
static inline void omap_control_usb_phy_power(struct device *dev, int on)
{
}
-static inline void omap_control_usb3_phy_power(struct device *dev, int on)
-{
-}
-
static inline void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode)
{
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index d528b8045150..704a1ab8240c 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -320,6 +320,8 @@ extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
extern void usb_serial_put(struct usb_serial *serial);
extern int usb_serial_generic_open(struct tty_struct *tty,
struct usb_serial_port *port);
+extern int usb_serial_generic_write_start(struct usb_serial_port *port,
+ gfp_t mem_flags);
extern int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
extern void usb_serial_generic_close(struct usb_serial_port *port);
diff --git a/include/linux/usb/usb_phy_gen_xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h
index f9a7e7bc925b..cc8d818a83be 100644
--- a/include/linux/usb/usb_phy_gen_xceiv.h
+++ b/include/linux/usb/usb_phy_gen_xceiv.h
@@ -9,10 +9,11 @@ struct usb_phy_gen_xceiv_platform_data {
/* if set fails with -EPROBE_DEFER if can't get regulator */
unsigned int needs_vcc:1;
- unsigned int needs_reset:1;
+ unsigned int needs_reset:1; /* deprecated */
+ int gpio_reset;
};
-#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
+#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
/* sometimes transceivers are accessed only through e.g. ULPI */
extern void usb_nop_xceiv_register(void);
extern void usb_nop_xceiv_unregister(void);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 9cb2fe8ca944..e303eef94dd5 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -42,6 +42,7 @@ struct usbnet {
struct usb_host_endpoint *status;
unsigned maxpacket;
struct timer_list delay;
+ const char *padding_pkt;
/* protocol/interface state */
struct net_device *net;
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
index 4ff744e2b678..c1257130769b 100644
--- a/include/linux/usb/wusb-wa.h
+++ b/include/linux/usb/wusb-wa.h
@@ -142,7 +142,7 @@ enum wa_notif_type {
struct wa_notif_hdr {
u8 bLength;
u8 bNotifyType; /* enum wa_notif_type */
-} __attribute__((packed));
+} __packed;
/**
* HWA DN Received notification [(WUSB] section 8.5.4.2)
@@ -158,7 +158,7 @@ struct hwa_notif_dn {
u8 bSourceDeviceAddr; /* from errata 2005/07 */
u8 bmAttributes;
struct wusb_dn_hdr dndata[];
-} __attribute__((packed));
+} __packed;
/* [WUSB] section 8.3.3 */
enum wa_xfer_type {
@@ -167,6 +167,8 @@ enum wa_xfer_type {
WA_XFER_TYPE_ISO = 0x82,
WA_XFER_RESULT = 0x83,
WA_XFER_ABORT = 0x84,
+ WA_XFER_ISO_PACKET_INFO = 0xA0,
+ WA_XFER_ISO_PACKET_STATUS = 0xA1,
};
/* [WUSB] section 8.3.3 */
@@ -177,28 +179,47 @@ struct wa_xfer_hdr {
__le32 dwTransferID; /* Host-assigned ID */
__le32 dwTransferLength; /* Length of data to xfer */
u8 bTransferSegment;
-} __attribute__((packed));
+} __packed;
struct wa_xfer_ctl {
struct wa_xfer_hdr hdr;
u8 bmAttribute;
__le16 wReserved;
struct usb_ctrlrequest baSetupData;
-} __attribute__((packed));
+} __packed;
struct wa_xfer_bi {
struct wa_xfer_hdr hdr;
u8 bReserved;
__le16 wReserved;
-} __attribute__((packed));
+} __packed;
+/* [WUSB] section 8.5.5 */
struct wa_xfer_hwaiso {
struct wa_xfer_hdr hdr;
u8 bReserved;
__le16 wPresentationTime;
__le32 dwNumOfPackets;
- /* FIXME: u8 pktdata[]? */
-} __attribute__((packed));
+} __packed;
+
+struct wa_xfer_packet_info_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ __le16 PacketLength[0];
+} __packed;
+
+struct wa_xfer_packet_status_len_hwaiso {
+ __le16 PacketLength;
+ __le16 PacketStatus;
+} __packed;
+
+struct wa_xfer_packet_status_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
+} __packed;
/* [WUSB] section 8.3.3.5 */
struct wa_xfer_abort {
@@ -206,7 +227,7 @@ struct wa_xfer_abort {
u8 bRequestType;
__le16 wRPipe; /* RPipe index */
__le32 dwTransferID; /* Host-assigned ID */
-} __attribute__((packed));
+} __packed;
/**
* WA Transfer Complete notification ([WUSB] section 8.3.3.3)
@@ -216,7 +237,7 @@ struct wa_notif_xfer {
struct wa_notif_hdr hdr;
u8 bEndpoint;
u8 Reserved;
-} __attribute__((packed));
+} __packed;
/** Transfer result basic codes [WUSB] table 8-15 */
enum {
@@ -243,7 +264,7 @@ struct wa_xfer_result {
u8 bTransferSegment;
u8 bTransferStatus;
__le32 dwNumOfPackets;
-} __attribute__((packed));
+} __packed;
/**
* Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
@@ -258,16 +279,16 @@ struct wa_xfer_result {
struct usb_wa_descriptor {
u8 bLength;
u8 bDescriptorType;
- u16 bcdWAVersion;
+ __le16 bcdWAVersion;
u8 bNumPorts; /* don't use!! */
u8 bmAttributes; /* Reserved == 0 */
- u16 wNumRPipes;
- u16 wRPipeMaxBlock;
+ __le16 wNumRPipes;
+ __le16 wRPipeMaxBlock;
u8 bRPipeBlockSize;
u8 bPwrOn2PwrGood;
u8 bNumMMCIEs;
u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
-} __attribute__((packed));
+} __packed;
/**
* HWA Device Information Buffer (WUSB1.0[T8.54])
@@ -277,6 +298,6 @@ struct hwa_dev_info {
u8 bDeviceAddress;
__le16 wPHYRates;
u8 bmDeviceAttribute;
-} __attribute__((packed));
+} __packed;
#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 0c4d4ca370ec..eeb28329fa3c 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -271,6 +271,8 @@ static inline u8 wusb_key_index(int index, int type, int originator)
#define WUSB_KEY_INDEX_TYPE_GTK 2
#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+/* bits 0-3 used for the key index. */
+#define WUSB_KEY_INDEX_MAX 15
/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
struct aes_ccm_nonce {
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index bf99cd01be20..630356866030 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -66,7 +66,9 @@
US_FLAG(INITIAL_READ10, 0x00100000) \
/* Initial READ(10) (and others) must be retried */ \
US_FLAG(WRITE_CACHE, 0x00200000) \
- /* Write Cache status is not available */
+ /* Write Cache status is not available */ \
+ US_FLAG(NEEDS_CAP16, 0x00400000)
+ /* cannot handle READ_CAPACITY_10 */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 14105c26a836..4836ba3c1cd8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -27,8 +27,12 @@ struct user_namespace {
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
- bool may_mount_sysfs;
- bool may_mount_proc;
+
+ /* Register of per-UID persistent keyrings for this namespace */
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ struct key *persistent_keyring_register;
+ struct rw_semaphore persistent_keyring_register_sem;
+#endif
};
extern struct user_namespace init_user_ns;
@@ -85,6 +89,4 @@ static inline void put_user_ns(struct user_namespace *ns)
#endif
-void update_mnt_policy(struct user_namespace *userns);
-
#endif /* _LINUX_USER_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index ac8d488e4372..24579a0312a0 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -90,4 +90,11 @@ extern void vfio_unregister_iommu_driver(
TYPE tmp; \
offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \
+/*
+ * External user API
+ */
+extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
+extern void vfio_group_put_external_user(struct vfio_group *group);
+extern int vfio_external_user_iommu_id(struct vfio_group *group);
+
#endif /* VFIO_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 36d36cc89329..e4abb84199be 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,11 +51,11 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
-void virtqueue_kick(struct virtqueue *vq);
+bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
-void virtqueue_notify(struct virtqueue *vq);
+bool virtqueue_notify(struct virtqueue *vq);
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
@@ -73,6 +73,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+bool virtqueue_is_broken(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 29b9104232b4..e8f8f71e843c 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -96,33 +96,6 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
return test_bit(fbit, vdev->features);
}
-/**
- * virtio_config_val - look for a feature and get a virtio config entry.
- * @vdev: the virtio device
- * @fbit: the feature bit
- * @offset: the type to search for.
- * @v: a pointer to the value to fill in.
- *
- * The return value is -ENOENT if the feature doesn't exist. Otherwise
- * the config value is copied into whatever is pointed to by v. */
-#define virtio_config_val(vdev, fbit, offset, v) \
- virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
-
-#define virtio_config_val_len(vdev, fbit, offset, v, len) \
- virtio_config_buf((vdev), (fbit), (offset), (v), (len))
-
-static inline int virtio_config_buf(struct virtio_device *vdev,
- unsigned int fbit,
- unsigned int offset,
- void *buf, unsigned len)
-{
- if (!virtio_has_feature(vdev, fbit))
- return -ENOENT;
-
- vdev->config->get(vdev, offset, buf, len);
- return 0;
-}
-
static inline
struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *c, const char *n)
@@ -162,5 +135,139 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
return 0;
}
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ (*ptr) = 1; \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ *(ptr) = virtio_cread8(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 2: \
+ *(ptr) = virtio_cread16(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 4: \
+ *(ptr) = virtio_cread32(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 8: \
+ *(ptr) = virtio_cread64(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ BUG_ON((*ptr) == 1); \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ virtio_cwrite8(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 2: \
+ virtio_cwrite16(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 4: \
+ virtio_cwrite32(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 8: \
+ virtio_cwrite64(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t len)
+{
+ vdev->config->get(vdev, offset, buf, len);
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+ unsigned int offset, u8 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u16 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+ unsigned int offset, u16 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u32 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+ unsigned int offset, u32 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u64 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+ unsigned int offset, u64 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread((vdev), structname, member, ptr); \
+ _r; \
+ })
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index b300787af8e0..67e06fe18c03 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,7 +71,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
void *pages,
- void (*notify)(struct virtqueue *vq),
+ bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index bd6cf61142be..c557c6d096de 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -39,6 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES,
+ NUMA_HUGE_PTE_UPDATES,
NUMA_HINT_FAULTS,
NUMA_HINT_FAULTS_LOCAL,
NUMA_PAGE_MIGRATE,
@@ -70,6 +71,12 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_SMP
+ NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
+ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+#endif
+ NR_TLB_LOCAL_FLUSH_ALL,
+ NR_TLB_LOCAL_FLUSH_ONE,
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index c586679b6fef..e4b948080d20 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -143,7 +143,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
}
extern unsigned long global_reclaimable_pages(void);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
#ifdef CONFIG_NUMA
/*
@@ -198,7 +197,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
-void refresh_cpu_vm_stats(int);
+void cpu_vm_stats_fold(int cpu);
void refresh_zone_stat_thresholds(void);
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
@@ -255,6 +254,7 @@ static inline void __dec_zone_page_state(struct page *page,
static inline void refresh_cpu_vm_stats(int cpu) { }
static inline void refresh_zone_stat_thresholds(void) { }
+static inline void cpu_vm_stats_fold(int cpu) { }
static inline void drain_zonestat(struct zone *zone,
struct per_cpu_pageset *pset) { }
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a67fc1635592..eaa00b10abaa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1,7 +1,8 @@
#ifndef _LINUX_WAIT_H
#define _LINUX_WAIT_H
-
-
+/*
+ * Linux wait queue related types and methods
+ */
#include <linux/list.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
@@ -13,27 +14,27 @@ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, v
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
struct __wait_queue {
- unsigned int flags;
+ unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
- void *private;
- wait_queue_func_t func;
- struct list_head task_list;
+ void *private;
+ wait_queue_func_t func;
+ struct list_head task_list;
};
struct wait_bit_key {
- void *flags;
- int bit_nr;
-#define WAIT_ATOMIC_T_BIT_NR -1
+ void *flags;
+ int bit_nr;
+#define WAIT_ATOMIC_T_BIT_NR -1
};
struct wait_bit_queue {
- struct wait_bit_key key;
- wait_queue_t wait;
+ struct wait_bit_key key;
+ wait_queue_t wait;
};
struct __wait_queue_head {
- spinlock_t lock;
- struct list_head task_list;
+ spinlock_t lock;
+ struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
@@ -84,17 +85,17 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
- q->flags = 0;
- q->private = p;
- q->func = default_wake_function;
+ q->flags = 0;
+ q->private = p;
+ q->func = default_wake_function;
}
-static inline void init_waitqueue_func_entry(wait_queue_t *q,
- wait_queue_func_t func)
+static inline void
+init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
{
- q->flags = 0;
- q->private = NULL;
- q->func = func;
+ q->flags = 0;
+ q->private = NULL;
+ q->func = func;
}
static inline int waitqueue_active(wait_queue_head_t *q)
@@ -114,8 +115,8 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
/*
* Used for wake-one threads:
*/
-static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
+static inline void
+__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(q, wait);
@@ -127,23 +128,22 @@ static inline void __add_wait_queue_tail(wait_queue_head_t *head,
list_add_tail(&new->task_list, &head->task_list);
}
-static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
- wait_queue_t *wait)
+static inline void
+__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(q, wait);
}
-static inline void __remove_wait_queue(wait_queue_head_t *head,
- wait_queue_t *old)
+static inline void
+__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
{
list_del(&old->task_list);
}
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
- void *key);
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
@@ -170,27 +170,64 @@ wait_queue_head_t *bit_waitqueue(void *, int);
/*
* Wakeup macros to be used to report events to the targets.
*/
-#define wake_up_poll(x, m) \
+#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, (void *) (m))
-#define wake_up_locked_poll(x, m) \
+#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
-#define wake_up_interruptible_poll(x, m) \
+#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
#define wake_up_interruptible_sync_poll(x, m) \
__wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
-#define __wait_event(wq, condition) \
-do { \
- DEFINE_WAIT(__wait); \
+#define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+
+#define ___wait_is_interruptible(state) \
+ (!__builtin_constant_p(state) || \
+ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+
+#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
+({ \
+ __label__ __out; \
+ wait_queue_t __wait; \
+ long __ret = ret; \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ if (exclusive) \
+ __wait.flags = WQ_FLAG_EXCLUSIVE; \
+ else \
+ __wait.flags = 0; \
\
for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
+ long __int = prepare_to_wait_event(&wq, &__wait, state);\
+ \
if (condition) \
break; \
- schedule(); \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ if (exclusive) { \
+ abort_exclusive_wait(&wq, &__wait, \
+ state, NULL); \
+ goto __out; \
+ } \
+ break; \
+ } \
+ \
+ cmd; \
} \
finish_wait(&wq, &__wait); \
-} while (0)
+__out: __ret; \
+})
+
+#define __wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ schedule())
/**
* wait_event - sleep until a condition gets true
@@ -204,29 +241,17 @@ do { \
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*/
-#define wait_event(wq, condition) \
+#define wait_event(wq, condition) \
do { \
- if (condition) \
+ if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
-#define __wait_event_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- } \
- if (!ret && (condition)) \
- ret = 1; \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
/**
* wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -248,29 +273,40 @@ do { \
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, __ret); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_timeout(wq, condition, timeout); \
__ret; \
})
-#define __wait_event_interruptible(wq, condition, ret) \
+#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ cmd1; schedule(); cmd2)
+
+/**
+ * wait_event_cmd - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * cmd1: the command will be executed before sleep
+ * cmd2: the command will be executed after sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_cmd(wq, condition, cmd1, cmd2) \
do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
+ if (condition) \
break; \
- } \
- finish_wait(&wq, &__wait); \
+ __wait_event_cmd(wq, condition, cmd1, cmd2); \
} while (0)
+#define __wait_event_interruptible(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule())
+
/**
* wait_event_interruptible - sleep until a condition gets true
* @wq: the waitqueue to wait on
@@ -290,31 +326,14 @@ do { \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_interruptible(wq, condition, __ret); \
+ __ret = __wait_event_interruptible(wq, condition); \
__ret; \
})
-#define __wait_event_interruptible_timeout(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current)) { \
- ret = schedule_timeout(ret); \
- if (!ret) \
- break; \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- if (!ret && (condition)) \
- ret = 1; \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_interruptible_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
/**
* wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -337,15 +356,15 @@ do { \
#define wait_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
- if (!(condition)) \
- __wait_event_interruptible_timeout(wq, condition, __ret); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_timeout(wq, \
+ condition, timeout); \
__ret; \
})
#define __wait_event_hrtimeout(wq, condition, timeout, state) \
({ \
int __ret = 0; \
- DEFINE_WAIT(__wait); \
struct hrtimer_sleeper __t; \
\
hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
@@ -356,25 +375,15 @@ do { \
current->timer_slack_ns, \
HRTIMER_MODE_REL); \
\
- for (;;) { \
- prepare_to_wait(&wq, &__wait, state); \
- if (condition) \
- break; \
- if (state == TASK_INTERRUPTIBLE && \
- signal_pending(current)) { \
- __ret = -ERESTARTSYS; \
- break; \
- } \
+ __ret = ___wait_event(wq, condition, state, 0, 0, \
if (!__t.task) { \
__ret = -ETIME; \
break; \
} \
- schedule(); \
- } \
+ schedule()); \
\
hrtimer_cancel(&__t.timer); \
destroy_hrtimer_on_stack(&__t.timer); \
- finish_wait(&wq, &__wait); \
__ret; \
})
@@ -428,33 +437,15 @@ do { \
__ret; \
})
-#define __wait_event_interruptible_exclusive(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait_exclusive(&wq, &__wait, \
- TASK_INTERRUPTIBLE); \
- if (condition) { \
- finish_wait(&wq, &__wait); \
- break; \
- } \
- if (!signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- abort_exclusive_wait(&wq, &__wait, \
- TASK_INTERRUPTIBLE, NULL); \
- break; \
- } \
-} while (0)
+#define __wait_event_interruptible_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
+ schedule())
#define wait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_interruptible_exclusive(wq, condition, __ret);\
+ __ret = __wait_event_interruptible_exclusive(wq, condition);\
__ret; \
})
@@ -606,24 +597,8 @@ do { \
? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
-
-#define __wait_event_killable(wq, condition, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
- if (condition) \
- break; \
- if (!fatal_signal_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_killable(wq, condition) \
+ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
/**
* wait_event_killable - sleep until a condition gets true
@@ -644,26 +619,17 @@ do { \
({ \
int __ret = 0; \
if (!(condition)) \
- __wait_event_killable(wq, condition, __ret); \
+ __ret = __wait_event_killable(wq, condition); \
__ret; \
})
#define __wait_event_lock_irq(wq, condition, lock, cmd) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
/**
* wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -723,26 +689,12 @@ do { \
} while (0)
-#define __wait_event_interruptible_lock_irq(wq, condition, \
- lock, ret, cmd) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock); \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
/**
* wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
@@ -772,10 +724,9 @@ do { \
#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
({ \
int __ret = 0; \
- \
if (!(condition)) \
- __wait_event_interruptible_lock_irq(wq, condition, \
- lock, __ret, cmd); \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock, cmd); \
__ret; \
})
@@ -804,39 +755,24 @@ do { \
#define wait_event_interruptible_lock_irq(wq, condition, lock) \
({ \
int __ret = 0; \
- \
if (!(condition)) \
- __wait_event_interruptible_lock_irq(wq, condition, \
- lock, __ret, ); \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock,); \
__ret; \
})
#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
- lock, ret) \
-do { \
- DEFINE_WAIT(__wait); \
- \
- for (;;) { \
- prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (signal_pending(current)) { \
- ret = -ERESTARTSYS; \
- break; \
- } \
- spin_unlock_irq(&lock); \
- ret = schedule_timeout(ret); \
- spin_lock_irq(&lock); \
- if (!ret) \
- break; \
- } \
- finish_wait(&wq, &__wait); \
-} while (0)
+ lock, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ spin_unlock_irq(&lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock_irq(&lock));
/**
- * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
- * The condition is checked under the lock. This is expected
- * to be called with the lock taken.
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
+ * true or a timeout elapses. The condition is checked under
+ * the lock. This is expected to be called with the lock taken.
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
@@ -860,11 +796,10 @@ do { \
#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
timeout) \
({ \
- int __ret = timeout; \
- \
- if (!(condition)) \
- __wait_event_interruptible_lock_irq_timeout( \
- wq, condition, lock, __ret); \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, timeout); \
__ret; \
})
@@ -875,20 +810,18 @@ do { \
* We plan to remove these interfaces.
*/
extern void sleep_on(wait_queue_head_t *q);
-extern long sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout);
+extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
extern void interruptible_sleep_on(wait_queue_head_t *q);
-extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
- signed long timeout);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
/*
* Waitqueues which are removed from the waitqueue_head at wakeup time
*/
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
- unsigned int mode, void *key);
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
@@ -934,8 +867,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
* One uses wait_on_bit() where one is waiting for the bit to clear,
* but has no intention of setting it.
*/
-static inline int wait_on_bit(void *word, int bit,
- int (*action)(void *), unsigned mode)
+static inline int
+wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
{
if (!test_bit(bit, word))
return 0;
@@ -958,8 +891,8 @@ static inline int wait_on_bit(void *word, int bit,
* One uses wait_on_bit_lock() where one is waiting for the bit to
* clear with the intention of setting it, and when done, clearing it.
*/
-static inline int wait_on_bit_lock(void *word, int bit,
- int (*action)(void *), unsigned mode)
+static inline int
+wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
{
if (!test_and_set_bit(bit, word))
return 0;
@@ -983,5 +916,5 @@ int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
return 0;
return out_of_line_wait_on_atomic_t(val, action, mode);
}
-
-#endif
+
+#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4e198ca1f685..fc0e4320aa6d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,9 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-void sync_inodes_sb(struct super_block *);
-long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
- enum wb_reason reason);
+void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index fdbafc6841cf..91b0a68d38dc 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -31,7 +31,7 @@ struct xattr_handler {
};
struct xattr {
- char *name;
+ const char *name;
void *value;
size_t value_len;
};
diff --git a/include/linux/yam.h b/include/linux/yam.h
index 7fe28228b274..512cdc2fb80f 100644
--- a/include/linux/yam.h
+++ b/include/linux/yam.h
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
struct yamdrv_ioctl_mcs {
int cmd;
- int bitrate;
+ unsigned int bitrate;
unsigned char bits[YAM_FPGA_SIZE];
};