first commit

This commit is contained in:
nqthai199@gmail.com 2022-09-15 09:26:49 +07:00
commit df3dd9a705
168 changed files with 67159 additions and 0 deletions

129
Makefile Executable file
View File

@ -0,0 +1,129 @@
################################################################################
# Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#################################################################################
#enable this flag to use optimized dsexample plugin
#it can also be exported from command line
WITH_OPENCV?=1
USE_OPTIMIZED_DSEXAMPLE?=0
CUDA_VER?=11.2
ifeq ($(CUDA_VER),)
$(error "CUDA_VER is not set")
endif
TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
CXX:= g++
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
ifeq ($(USE_OPTIMIZED_DSEXAMPLE),1)
SRCS:= gstdsexample_optimized.cpp
else
SRCS:= gstdsexample.cpp
SRCS+= ./src/CharacterDetection.cpp
SRCS+= ./src/CropLicensePlate.cpp
SRCS+= ./src/GetPlateColor.cpp
SRCS+= ./src/LicensePlateRecognition.cpp
SRCS+= ./src/LPRprocessor.cpp
SRCS+= ./src/config.cpp
SRCS+= ./src/CropLP.cpp
SRCS+= ./src/YOLOv5/preprocess.cu
SRCS+= ./src/YOLOv5/yololayer.cu
SRCS+= ./src/YOLOv5/DetectorCOCO.cpp
endif
INCS:= $(wildcard *.h)
INCS+= $(wildcard ./include/*.h)
INCS+= $(wildcard ./include/*.hpp)
INCS+= $(wildcard ./include/YOLOv5/*.h)
INCS+= $(wildcard ./include/YOLOv5/*.hpp)
LIB:=libnvdsgst_dsexample.so
NVDS_VERSION:=6.0
DEP:=dsexample_lib/libdsexample.a
DEP_FILES:=$(wildcard dsexample_lib/dsexample_lib.* )
DEP_FILES-=$(DEP)
CFLAGS+= -fPIC -DDS_VERSION=\"6.0\" \
-I /usr/local/cuda-$(CUDA_VER)/include \
-I ../../includes \
-I ./lib_ncnn/ncnn \
-I ./include \
-I ./include/YOLOv5
CFLAGS_NVCC:=$(shell pkg-config --cflags --libs opencv4 )
CFLAGS_NVCC+=-I ./include -I ./include/YOLOv5
GST_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/gst-plugins/
LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/
LIBS := -shared -Wl,-no-undefined \
-L dsexample_lib -ldsexample \
-L/usr/local/cuda-$(CUDA_VER)/lib64/ -lcudart -ldl \
-lnppc -lnppig -lnpps -lnppicc -lnppidei \
-L$(LIB_INSTALL_DIR) -lnvdsgst_helper -lnvdsgst_meta -lnvds_meta -lnvbufsurface -lnvbufsurftransform \
-Wl,-rpath,$(LIB_INSTALL_DIR) \
-L lib_ncnn -lncnn -fopenmp\
-lnvinfer -lpthread \
# -lnvinfer_plugin -lnvparsers \
OBJS:= $(SRCS:.cpp=.o)
OBJS:= $(OBJS:.cu=.o)
PKGS:= gstreamer-1.0 gstreamer-base-1.0 gstreamer-video-1.0
ifeq ($(WITH_OPENCV),1)
CFLAGS+= -DWITH_OPENCV
ifeq ($(TARGET_DEVICE),aarch64)
PKGS+= opencv4
else
PKGS+= opencv
endif
endif
CFLAGS+=$(shell pkg-config --cflags $(PKGS))
LIBS+=$(shell pkg-config --libs $(PKGS))
all: $(LIB)
%.o: %.cpp $(INCS) Makefile
@echo $(CFLAGS)
$(CXX) -c -o $@ $(CFLAGS) $<
%.o: %.cu $(INCS) Makefile
$(NVCC) -c -o $@ $(CFLAGS_NVCC) --compiler-options '-fPIC' $<
$(LIB): $(OBJS) $(DEP) Makefile
@echo $(CFLAGS)
$(CXX) -o $@ $(OBJS) $(LIBS)
$(DEP): $(DEP_FILES)
$(MAKE) -C dsexample_lib/
install: $(LIB)
cp -rv $(LIB) $(GST_INSTALL_DIR)
clean:
rm -rf $(OBJS) $(LIB)

51
README Executable file
View File

@ -0,0 +1,51 @@
################################################################################
# Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA Corporation and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA Corporation is strictly prohibited.
#
################################################################################
Refer to the DeepStream SDK documentation for a description of the plugin.
--------------------------------------------------------------------------------
Pre-requisites:
- GStreamer-1.0 Development package
- GStreamer-1.0 Base Plugins Development package
- OpenCV Development package
Install using:
sudo apt-get install libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
libopencv-dev
--------------------------------------------------------------------------------
Compiling and installing the plugin:
Export or set in Makefile the appropriate cuda version using CUDA_VER
Run make and sudo make install
NOTE:
1. To compile the sources, run make with "sudo" or root permission.
2. This plugin contains additional optimized sample which supports batch processing
of buffers. Refer to the Makefile for using optimized sample.
3. OpenCV has been deprecated by default, so blur-objects will not work.
To enable OpenCV in dsexample, set `WITH_OPENCV=1` in the plugin Makefile
(/opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-dsexample/Makefile)
and follow compilation and installation instructions present in this README.
--------------------------------------------------------------------------------
Corresponding config file changes (Add the following section). GPU ID might need
modification based on the GPU configuration:
[ds-example]
enable=1
processing-width=640
processing-height=480
full-frame=0
#batch-size for batch supported optimized plugin
batch-size=1
unique-id=15
gpu-id=0
blur-objects=0
# Supported memory types for blur-objects are 1 and 3
nvbuf-memory-type=3

View File

@ -0,0 +1,282 @@
7767517
280 310
Input images 0 1 images
YoloV5Focus focus 1 1 images 683
Convolution Conv_41 1 1 683 1177 0=16 1=3 4=1 5=1 6=1728
Swish Mul_43 1 1 1177 687
ConvolutionDepthWise Conv_44 1 1 687 1180 0=16 1=3 3=2 4=1 5=1 6=144 7=16
Swish Mul_46 1 1 1180 691
Convolution Conv_47 1 1 691 1183 0=32 1=1 5=1 6=512
Swish Mul_49 1 1 1183 695
Split splitncnn_0 1 2 695 695_splitncnn_0 695_splitncnn_1
Convolution Conv_50 1 1 695_splitncnn_1 1186 0=16 1=1 5=1 6=512
Swish Mul_52 1 1 1186 699
Split splitncnn_1 1 2 699 699_splitncnn_0 699_splitncnn_1
Convolution Conv_53 1 1 695_splitncnn_0 1189 0=16 1=1 5=1 6=512
Swish Mul_55 1 1 1189 703
Convolution Conv_56 1 1 699_splitncnn_1 1192 0=16 1=1 5=1 6=256
Swish Mul_58 1 1 1192 707
ConvolutionDepthWise Conv_59 1 1 707 1195 0=16 1=3 4=1 5=1 6=144 7=16
Swish Mul_61 1 1 1195 711
Convolution Conv_62 1 1 711 1198 0=16 1=1 5=1 6=256
Swish Mul_64 1 1 1198 715
BinaryOp Add_65 2 1 715 699_splitncnn_0 716
Concat Concat_66 2 1 716 703 717
Convolution Conv_67 1 1 717 1201 0=32 1=1 5=1 6=1024
Swish Mul_69 1 1 1201 721
ConvolutionDepthWise Conv_70 1 1 721 1204 0=32 1=3 3=2 4=1 5=1 6=288 7=32
Swish Mul_72 1 1 1204 725
Convolution Conv_73 1 1 725 1207 0=64 1=1 5=1 6=2048
Swish Mul_75 1 1 1207 729
Split splitncnn_2 1 2 729 729_splitncnn_0 729_splitncnn_1
Convolution Conv_76 1 1 729_splitncnn_1 1210 0=32 1=1 5=1 6=2048
Swish Mul_78 1 1 1210 733
Split splitncnn_3 1 2 733 733_splitncnn_0 733_splitncnn_1
Convolution Conv_79 1 1 729_splitncnn_0 1213 0=32 1=1 5=1 6=2048
Swish Mul_81 1 1 1213 737
Convolution Conv_82 1 1 733_splitncnn_1 1216 0=32 1=1 5=1 6=1024
Swish Mul_84 1 1 1216 741
ConvolutionDepthWise Conv_85 1 1 741 1219 0=32 1=3 4=1 5=1 6=288 7=32
Swish Mul_87 1 1 1219 745
Convolution Conv_88 1 1 745 1222 0=32 1=1 5=1 6=1024
Swish Mul_90 1 1 1222 749
BinaryOp Add_91 2 1 749 733_splitncnn_0 750
Split splitncnn_4 1 2 750 750_splitncnn_0 750_splitncnn_1
Convolution Conv_92 1 1 750_splitncnn_1 1225 0=32 1=1 5=1 6=1024
Swish Mul_94 1 1 1225 754
ConvolutionDepthWise Conv_95 1 1 754 1228 0=32 1=3 4=1 5=1 6=288 7=32
Swish Mul_97 1 1 1228 758
Convolution Conv_98 1 1 758 1231 0=32 1=1 5=1 6=1024
Swish Mul_100 1 1 1231 762
BinaryOp Add_101 2 1 762 750_splitncnn_0 763
Split splitncnn_5 1 2 763 763_splitncnn_0 763_splitncnn_1
Convolution Conv_102 1 1 763_splitncnn_1 1234 0=32 1=1 5=1 6=1024
Swish Mul_104 1 1 1234 767
ConvolutionDepthWise Conv_105 1 1 767 1237 0=32 1=3 4=1 5=1 6=288 7=32
Swish Mul_107 1 1 1237 771
Convolution Conv_108 1 1 771 1240 0=32 1=1 5=1 6=1024
Swish Mul_110 1 1 1240 775
BinaryOp Add_111 2 1 775 763_splitncnn_0 776
Concat Concat_112 2 1 776 737 777
Convolution Conv_113 1 1 777 1243 0=64 1=1 5=1 6=4096
Swish Mul_115 1 1 1243 781
Split splitncnn_6 1 2 781 781_splitncnn_0 781_splitncnn_1
ConvolutionDepthWise Conv_116 1 1 781_splitncnn_1 1246 0=64 1=3 3=2 4=1 5=1 6=576 7=64
Swish Mul_118 1 1 1246 785
Convolution Conv_119 1 1 785 1249 0=128 1=1 5=1 6=8192
Swish Mul_121 1 1 1249 789
Split splitncnn_7 1 2 789 789_splitncnn_0 789_splitncnn_1
Convolution Conv_122 1 1 789_splitncnn_1 1252 0=64 1=1 5=1 6=8192
Swish Mul_124 1 1 1252 793
Split splitncnn_8 1 2 793 793_splitncnn_0 793_splitncnn_1
Convolution Conv_125 1 1 789_splitncnn_0 1255 0=64 1=1 5=1 6=8192
Swish Mul_127 1 1 1255 797
Convolution Conv_128 1 1 793_splitncnn_1 1258 0=64 1=1 5=1 6=4096
Swish Mul_130 1 1 1258 801
ConvolutionDepthWise Conv_131 1 1 801 1261 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_133 1 1 1261 805
Convolution Conv_134 1 1 805 1264 0=64 1=1 5=1 6=4096
Swish Mul_136 1 1 1264 809
BinaryOp Add_137 2 1 809 793_splitncnn_0 810
Split splitncnn_9 1 2 810 810_splitncnn_0 810_splitncnn_1
Convolution Conv_138 1 1 810_splitncnn_1 1267 0=64 1=1 5=1 6=4096
Swish Mul_140 1 1 1267 814
ConvolutionDepthWise Conv_141 1 1 814 1270 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_143 1 1 1270 818
Convolution Conv_144 1 1 818 1273 0=64 1=1 5=1 6=4096
Swish Mul_146 1 1 1273 822
BinaryOp Add_147 2 1 822 810_splitncnn_0 823
Split splitncnn_10 1 2 823 823_splitncnn_0 823_splitncnn_1
Convolution Conv_148 1 1 823_splitncnn_1 1276 0=64 1=1 5=1 6=4096
Swish Mul_150 1 1 1276 827
ConvolutionDepthWise Conv_151 1 1 827 1279 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_153 1 1 1279 831
Convolution Conv_154 1 1 831 1282 0=64 1=1 5=1 6=4096
Swish Mul_156 1 1 1282 835
BinaryOp Add_157 2 1 835 823_splitncnn_0 836
Concat Concat_158 2 1 836 797 837
Convolution Conv_159 1 1 837 1285 0=128 1=1 5=1 6=16384
Swish Mul_161 1 1 1285 841
Split splitncnn_11 1 2 841 841_splitncnn_0 841_splitncnn_1
ConvolutionDepthWise Conv_162 1 1 841_splitncnn_1 1288 0=128 1=3 3=2 4=1 5=1 6=1152 7=128
Swish Mul_164 1 1 1288 845
Convolution Conv_165 1 1 845 1291 0=256 1=1 5=1 6=32768
Swish Mul_167 1 1 1291 849
Convolution Conv_168 1 1 849 1294 0=128 1=1 5=1 6=32768
Swish Mul_170 1 1 1294 853
Split splitncnn_12 1 4 853 853_splitncnn_0 853_splitncnn_1 853_splitncnn_2 853_splitncnn_3
Pooling MaxPool_171 1 1 853_splitncnn_3 854 1=5 3=2 5=1
Pooling MaxPool_172 1 1 853_splitncnn_2 855 1=9 3=4 5=1
Pooling MaxPool_173 1 1 853_splitncnn_1 856 1=13 3=6 5=1
Concat Concat_174 4 1 853_splitncnn_0 854 855 856 857
Convolution Conv_175 1 1 857 1297 0=256 1=1 5=1 6=131072
Swish Mul_177 1 1 1297 861
Split splitncnn_13 1 2 861 861_splitncnn_0 861_splitncnn_1
Convolution Conv_178 1 1 861_splitncnn_1 1300 0=128 1=1 5=1 6=32768
Swish Mul_180 1 1 1300 865
Convolution Conv_181 1 1 861_splitncnn_0 1303 0=128 1=1 5=1 6=32768
Swish Mul_183 1 1 1303 869
Convolution Conv_184 1 1 865 1306 0=128 1=1 5=1 6=16384
Swish Mul_186 1 1 1306 873
ConvolutionDepthWise Conv_187 1 1 873 1309 0=128 1=3 4=1 5=1 6=1152 7=128
Swish Mul_189 1 1 1309 877
Convolution Conv_190 1 1 877 1312 0=128 1=1 5=1 6=16384
Swish Mul_192 1 1 1312 881
Concat Concat_193 2 1 881 869 882
Convolution Conv_194 1 1 882 1315 0=256 1=1 5=1 6=65536
Swish Mul_196 1 1 1315 886
Convolution Conv_197 1 1 886 1318 0=128 1=1 5=1 6=32768
Swish Mul_199 1 1 1318 890
Split splitncnn_14 1 2 890 890_splitncnn_0 890_splitncnn_1
Interp Resize_201 1 1 890_splitncnn_1 895 0=1 1=2.000000e+00 2=2.000000e+00
Concat Concat_202 2 1 895 841_splitncnn_0 896
Split splitncnn_15 1 2 896 896_splitncnn_0 896_splitncnn_1
Convolution Conv_203 1 1 896_splitncnn_1 1321 0=64 1=1 5=1 6=16384
Swish Mul_205 1 1 1321 900
Convolution Conv_206 1 1 896_splitncnn_0 1324 0=64 1=1 5=1 6=16384
Swish Mul_208 1 1 1324 904
Convolution Conv_209 1 1 900 1327 0=64 1=1 5=1 6=4096
Swish Mul_211 1 1 1327 908
ConvolutionDepthWise Conv_212 1 1 908 1330 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_214 1 1 1330 912
Convolution Conv_215 1 1 912 1333 0=64 1=1 5=1 6=4096
Swish Mul_217 1 1 1333 916
Concat Concat_218 2 1 916 904 917
Convolution Conv_219 1 1 917 1336 0=128 1=1 5=1 6=16384
Swish Mul_221 1 1 1336 921
Convolution Conv_222 1 1 921 1339 0=64 1=1 5=1 6=8192
Swish Mul_224 1 1 1339 925
Split splitncnn_16 1 2 925 925_splitncnn_0 925_splitncnn_1
Interp Resize_226 1 1 925_splitncnn_1 930 0=1 1=2.000000e+00 2=2.000000e+00
Concat Concat_227 2 1 930 781_splitncnn_0 931
Split splitncnn_17 1 2 931 931_splitncnn_0 931_splitncnn_1
Convolution Conv_228 1 1 931_splitncnn_1 1342 0=32 1=1 5=1 6=4096
Swish Mul_230 1 1 1342 935
Convolution Conv_231 1 1 931_splitncnn_0 1345 0=32 1=1 5=1 6=4096
Swish Mul_233 1 1 1345 939
Convolution Conv_234 1 1 935 1348 0=32 1=1 5=1 6=1024
Swish Mul_236 1 1 1348 943
ConvolutionDepthWise Conv_237 1 1 943 1351 0=32 1=3 4=1 5=1 6=288 7=32
Swish Mul_239 1 1 1351 947
Convolution Conv_240 1 1 947 1354 0=32 1=1 5=1 6=1024
Swish Mul_242 1 1 1354 951
Concat Concat_243 2 1 951 939 952
Convolution Conv_244 1 1 952 1357 0=64 1=1 5=1 6=4096
Swish Mul_246 1 1 1357 956
Split splitncnn_18 1 2 956 956_splitncnn_0 956_splitncnn_1
ConvolutionDepthWise Conv_247 1 1 956_splitncnn_1 1360 0=64 1=3 3=2 4=1 5=1 6=576 7=64
Swish Mul_249 1 1 1360 960
Convolution Conv_250 1 1 960 1363 0=64 1=1 5=1 6=4096
Swish Mul_252 1 1 1363 964
Concat Concat_253 2 1 964 925_splitncnn_0 965
Split splitncnn_19 1 2 965 965_splitncnn_0 965_splitncnn_1
Convolution Conv_254 1 1 965_splitncnn_1 1366 0=64 1=1 5=1 6=8192
Swish Mul_256 1 1 1366 969
Convolution Conv_257 1 1 965_splitncnn_0 1369 0=64 1=1 5=1 6=8192
Swish Mul_259 1 1 1369 973
Convolution Conv_260 1 1 969 1372 0=64 1=1 5=1 6=4096
Swish Mul_262 1 1 1372 977
ConvolutionDepthWise Conv_263 1 1 977 1375 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_265 1 1 1375 981
Convolution Conv_266 1 1 981 1378 0=64 1=1 5=1 6=4096
Swish Mul_268 1 1 1378 985
Concat Concat_269 2 1 985 973 986
Convolution Conv_270 1 1 986 1381 0=128 1=1 5=1 6=16384
Swish Mul_272 1 1 1381 990
Split splitncnn_20 1 2 990 990_splitncnn_0 990_splitncnn_1
ConvolutionDepthWise Conv_273 1 1 990_splitncnn_1 1384 0=128 1=3 3=2 4=1 5=1 6=1152 7=128
Swish Mul_275 1 1 1384 994
Convolution Conv_276 1 1 994 1387 0=128 1=1 5=1 6=16384
Swish Mul_278 1 1 1387 998
Concat Concat_279 2 1 998 890_splitncnn_0 999
Split splitncnn_21 1 2 999 999_splitncnn_0 999_splitncnn_1
Convolution Conv_280 1 1 999_splitncnn_1 1390 0=128 1=1 5=1 6=32768
Swish Mul_282 1 1 1390 1003
Convolution Conv_283 1 1 999_splitncnn_0 1393 0=128 1=1 5=1 6=32768
Swish Mul_285 1 1 1393 1007
Convolution Conv_286 1 1 1003 1396 0=128 1=1 5=1 6=16384
Swish Mul_288 1 1 1396 1011
ConvolutionDepthWise Conv_289 1 1 1011 1399 0=128 1=3 4=1 5=1 6=1152 7=128
Swish Mul_291 1 1 1399 1015
Convolution Conv_292 1 1 1015 1402 0=128 1=1 5=1 6=16384
Swish Mul_294 1 1 1402 1019
Concat Concat_295 2 1 1019 1007 1020
Convolution Conv_296 1 1 1020 1405 0=256 1=1 5=1 6=65536
Swish Mul_298 1 1 1405 1024
Convolution Conv_299 1 1 956_splitncnn_0 1408 0=64 1=1 5=1 6=4096
Swish Mul_301 1 1 1408 1028
Split splitncnn_22 1 2 1028 1028_splitncnn_0 1028_splitncnn_1
ConvolutionDepthWise Conv_302 1 1 1028_splitncnn_1 1411 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_304 1 1 1411 1032
Convolution Conv_305 1 1 1032 1414 0=64 1=1 5=1 6=4096
Swish Mul_307 1 1 1414 1036
ConvolutionDepthWise Conv_308 1 1 1036 1417 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_310 1 1 1417 1040
Convolution Conv_311 1 1 1040 1420 0=64 1=1 5=1 6=4096
Swish Mul_313 1 1 1420 1044
Convolution Conv_314 1 1 1044 1065 0=34 1=1 5=1 6=2176 9=4
ConvolutionDepthWise Conv_315 1 1 1028_splitncnn_0 1423 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_317 1 1 1423 1049
Convolution Conv_318 1 1 1049 1426 0=64 1=1 5=1 6=4096
Swish Mul_320 1 1 1426 1053
ConvolutionDepthWise Conv_321 1 1 1053 1429 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_323 1 1 1429 1057
Convolution Conv_324 1 1 1057 1432 0=64 1=1 5=1 6=4096
Swish Mul_326 1 1 1432 1061
Split splitncnn_23 1 2 1061 1061_splitncnn_0 1061_splitncnn_1
Convolution Conv_327 1 1 1061_splitncnn_1 1062 0=4 1=1 5=1 6=256
Convolution Conv_328 1 1 1061_splitncnn_0 1064 0=1 1=1 5=1 6=64 9=4
Concat Concat_331 3 1 1062 1064 1065 1066
Convolution Conv_332 1 1 990_splitncnn_0 1435 0=64 1=1 5=1 6=8192
Swish Mul_334 1 1 1435 1070
Split splitncnn_24 1 2 1070 1070_splitncnn_0 1070_splitncnn_1
ConvolutionDepthWise Conv_335 1 1 1070_splitncnn_1 1438 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_337 1 1 1438 1074
Convolution Conv_338 1 1 1074 1441 0=64 1=1 5=1 6=4096
Swish Mul_340 1 1 1441 1078
ConvolutionDepthWise Conv_341 1 1 1078 1444 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_343 1 1 1444 1082
Convolution Conv_344 1 1 1082 1447 0=64 1=1 5=1 6=4096
Swish Mul_346 1 1 1447 1086
Convolution Conv_347 1 1 1086 1107 0=34 1=1 5=1 6=2176 9=4
ConvolutionDepthWise Conv_348 1 1 1070_splitncnn_0 1450 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_350 1 1 1450 1091
Convolution Conv_351 1 1 1091 1453 0=64 1=1 5=1 6=4096
Swish Mul_353 1 1 1453 1095
ConvolutionDepthWise Conv_354 1 1 1095 1456 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_356 1 1 1456 1099
Convolution Conv_357 1 1 1099 1459 0=64 1=1 5=1 6=4096
Swish Mul_359 1 1 1459 1103
Split splitncnn_25 1 2 1103 1103_splitncnn_0 1103_splitncnn_1
Convolution Conv_360 1 1 1103_splitncnn_1 1104 0=4 1=1 5=1 6=256
Convolution Conv_361 1 1 1103_splitncnn_0 1106 0=1 1=1 5=1 6=64 9=4
Concat Concat_364 3 1 1104 1106 1107 1108
Convolution Conv_365 1 1 1024 1462 0=64 1=1 5=1 6=16384
Swish Mul_367 1 1 1462 1112
Split splitncnn_26 1 2 1112 1112_splitncnn_0 1112_splitncnn_1
ConvolutionDepthWise Conv_368 1 1 1112_splitncnn_1 1465 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_370 1 1 1465 1116
Convolution Conv_371 1 1 1116 1468 0=64 1=1 5=1 6=4096
Swish Mul_373 1 1 1468 1120
ConvolutionDepthWise Conv_374 1 1 1120 1471 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_376 1 1 1471 1124
Convolution Conv_377 1 1 1124 1474 0=64 1=1 5=1 6=4096
Swish Mul_379 1 1 1474 1128
Convolution Conv_380 1 1 1128 1149 0=34 1=1 5=1 6=2176 9=4
ConvolutionDepthWise Conv_381 1 1 1112_splitncnn_0 1477 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_383 1 1 1477 1133
Convolution Conv_384 1 1 1133 1480 0=64 1=1 5=1 6=4096
Swish Mul_386 1 1 1480 1137
ConvolutionDepthWise Conv_387 1 1 1137 1483 0=64 1=3 4=1 5=1 6=576 7=64
Swish Mul_389 1 1 1483 1141
Convolution Conv_390 1 1 1141 1486 0=64 1=1 5=1 6=4096
Swish Mul_392 1 1 1486 1145
Split splitncnn_27 1 2 1145 1145_splitncnn_0 1145_splitncnn_1
Convolution Conv_393 1 1 1145_splitncnn_1 1146 0=4 1=1 5=1 6=256
Convolution Conv_394 1 1 1145_splitncnn_0 1148 0=1 1=1 5=1 6=64 9=4
Concat Concat_397 3 1 1146 1148 1149 1150
Reshape Reshape_405 1 1 1066 1158 0=-1 1=39
Reshape Reshape_413 1 1 1108 1166 0=-1 1=39
Reshape Reshape_421 1 1 1150 1174 0=-1 1=39
Concat Concat_422 3 1 1158 1166 1174 1175 0=1
Permute Transpose_423 1 1 1175 output 0=1

96
data/config.json Executable file
View File

@ -0,0 +1,96 @@
{
"mode": {
"gpuEnable" : 1,
"gpu_id" : 0
},
"num_threads" : 1,
"port" : 8080,
"models" : {
"detect4pointsquare" : "/opt/nvidia/deepstream/deepstream-6.0/sources/gst-plugins/gst-dsexample-OCR/data/wpod/detect_plate",
"character_detection_416_YOLOX": "/opt/nvidia/deepstream/deepstream-6.0/sources/gst-plugins/gst-dsexample-OCR/data/character_detection/character-detection-nano-416-16032022"
},
"threshold": {
"ocr": 0.3,
"nms": 0.3,
"detect": 0.7,
"detect_size": 144.0,
"min_width": 15,
"wh_ratio": 2.0
},
"using_filter_ocr": true,
"save_path": "../data",
"save_img": true,
"save_fail": true,
"color_classify":{
"crop_plate":{
"long":[[10,70], [10,230]],
"square":[[10,150], [10,230]]
},
"red":{
"low":[[0, 70, 80], [10, 255, 255]],
"up":[[150, 70, 80], [180, 255, 255]]
},
"blue_min_thres":0.08,
"blue":[[100, 90, 110], [125, 255, 255]],
"blue_dark":[[100, 70, 80], [130, 100, 120]],
"yellow":[[10, 100, 100], [60, 255, 255]],
"gray": [[0, 0, 70], [180, 80, 255]],
"white":[[0, 0, 0], [10, 120, 255]],
"black": [[0, 0, 0], [180,255,55]],
"black1": [[100, 100, 38], [130,255,50]]
},
"prob_remove_trash": 0.9,
"rules": {
"head": {
"enable": false,
"rule": {
"C": "0", "D": "0",
"F": "", "G": "6", "H": "", "I": "1", "J": "1",
"K": "", "L": "4", "M": "", "N": "",
"P": "", "Q": "0", "R": "", "S": "5", "T": "",
"U": "0", "V": "", "W": "",
"X": "", "Y": "", "Z": "2"
}
},
"mid": {
"enable": false,
"rule": {
"W": "N"
}
},
"tail": {
"enable": false,
"rule": {
"A": "4", "B": "8", "C": "0", "D": "0", "E": "",
"F": "", "G": "6", "H": "", "I": "1", "J": "1",
"K": "", "L": "4", "M": "", "N": "", "O": "0",
"P": "", "Q": "0", "R": "", "S": "5", "T": "",
"U": "0", "V": "", "W": "",
"X": "", "Y": "", "Z": "2"
}
}
},
"save_logs_api": {
"enable": false,
"push_log": {
"url": "http://localhost:4004",
"api_log":"/logs",
"method": "post"
}
},
"os": "windows",
"timeout": 5,
"open_door": {
"enable": 2,
"url": "http://192.168.0.11:2001/connect",
"api_open_door":"/connect",
"in": {
"doorId": "1",
"c3-ip": "192.168.1.204"
},
"out": {
"doorId": "2",
"c3-ip": "192.168.1.206"
}
}
}

BIN
data/wpod/detect_plate.bin Executable file

Binary file not shown.

102
data/wpod/detect_plate.param Executable file
View File

@ -0,0 +1,102 @@
7767517
100 110
Input input 0 1 input_blob 0=-1 1=-1 2=3
Convolution conv2d_1 1 1 input_blob conv2d_1_blob 0=16 1=3 2=1 3=1 4=-233 5=1 6=432 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_1 1 1 conv2d_1_blob batch_normalization_1_blob 0=16 1=1.000000e-03
ReLU activation_1 1 1 batch_normalization_1_blob activation_1_blob 0=0.000000e+00 1=0
Convolution conv2d_2 1 1 activation_1_blob conv2d_2_blob 0=16 1=3 2=1 3=1 4=-233 5=1 6=2304 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_2 1 1 conv2d_2_blob batch_normalization_2_blob 0=16 1=1.000000e-03
ReLU activation_2 1 1 batch_normalization_2_blob activation_2_blob 0=0.000000e+00 1=0
Pooling max_pooling2d_1 1 1 activation_2_blob max_pooling2d_1_blob 0=0 1=2 11=2 2=2 12=2 3=0 4=0 5=1
Convolution conv2d_3 1 1 max_pooling2d_1_blob conv2d_3_blob 0=32 1=3 2=1 3=1 4=-233 5=1 6=4608 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_3 1 1 conv2d_3_blob batch_normalization_3_blob 0=32 1=1.000000e-03
ReLU activation_3 1 1 batch_normalization_3_blob activation_3_blob 0=0.000000e+00 1=0
Split activation_3_Split 1 2 activation_3_blob activation_3_Split_blob_idx_0 activation_3_Split_blob_idx_1
Convolution conv2d_4 1 1 activation_3_Split_blob_idx_0 conv2d_4_blob 0=32 1=3 2=1 3=1 4=-233 5=1 6=9216 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_4 1 1 conv2d_4_blob batch_normalization_4_blob 0=32 1=1.000000e-03
ReLU activation_4 1 1 batch_normalization_4_blob activation_4_blob 0=0.000000e+00 1=0
Convolution conv2d_5 1 1 activation_4_blob conv2d_5_blob 0=32 1=3 2=1 3=1 4=-233 5=1 6=9216 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_5 1 1 conv2d_5_blob batch_normalization_5_blob 0=32 1=1.000000e-03
BinaryOp add_1 2 1 batch_normalization_5_blob activation_3_Split_blob_idx_1 add_1_blob 0=0 1=0 2=0.000000e+00
ReLU activation_5 1 1 add_1_blob activation_5_blob 0=0.000000e+00 1=0
Pooling max_pooling2d_2 1 1 activation_5_blob max_pooling2d_2_blob 0=0 1=2 11=2 2=2 12=2 3=0 4=0 5=1
Convolution conv2d_6 1 1 max_pooling2d_2_blob conv2d_6_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=18432 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_6 1 1 conv2d_6_blob batch_normalization_6_blob 0=64 1=1.000000e-03
ReLU activation_6 1 1 batch_normalization_6_blob activation_6_blob 0=0.000000e+00 1=0
Split activation_6_Split 1 2 activation_6_blob activation_6_Split_blob_idx_0 activation_6_Split_blob_idx_1
Convolution conv2d_7 1 1 activation_6_Split_blob_idx_0 conv2d_7_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_7 1 1 conv2d_7_blob batch_normalization_7_blob 0=64 1=1.000000e-03
ReLU activation_7 1 1 batch_normalization_7_blob activation_7_blob 0=0.000000e+00 1=0
Convolution conv2d_8 1 1 activation_7_blob conv2d_8_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_8 1 1 conv2d_8_blob batch_normalization_8_blob 0=64 1=1.000000e-03
BinaryOp add_2 2 1 batch_normalization_8_blob activation_6_Split_blob_idx_1 add_2_blob 0=0 1=0 2=0.000000e+00
ReLU activation_8 1 1 add_2_blob activation_8_blob 0=0.000000e+00 1=0
Split activation_8_Split 1 2 activation_8_blob activation_8_Split_blob_idx_0 activation_8_Split_blob_idx_1
Convolution conv2d_9 1 1 activation_8_Split_blob_idx_0 conv2d_9_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_9 1 1 conv2d_9_blob batch_normalization_9_blob 0=64 1=1.000000e-03
ReLU activation_9 1 1 batch_normalization_9_blob activation_9_blob 0=0.000000e+00 1=0
Convolution conv2d_10 1 1 activation_9_blob conv2d_10_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_10 1 1 conv2d_10_blob batch_normalization_10_blob 0=64 1=1.000000e-03
BinaryOp add_3 2 1 batch_normalization_10_blob activation_8_Split_blob_idx_1 add_3_blob 0=0 1=0 2=0.000000e+00
ReLU activation_10 1 1 add_3_blob activation_10_blob 0=0.000000e+00 1=0
Pooling max_pooling2d_3 1 1 activation_10_blob max_pooling2d_3_blob 0=0 1=2 11=2 2=2 12=2 3=0 4=0 5=1
Convolution conv2d_11 1 1 max_pooling2d_3_blob conv2d_11_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_11 1 1 conv2d_11_blob batch_normalization_11_blob 0=64 1=1.000000e-03
ReLU activation_11 1 1 batch_normalization_11_blob activation_11_blob 0=0.000000e+00 1=0
Split activation_11_Split 1 2 activation_11_blob activation_11_Split_blob_idx_0 activation_11_Split_blob_idx_1
Convolution conv2d_12 1 1 activation_11_Split_blob_idx_0 conv2d_12_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_12 1 1 conv2d_12_blob batch_normalization_12_blob 0=64 1=1.000000e-03
ReLU activation_12 1 1 batch_normalization_12_blob activation_12_blob 0=0.000000e+00 1=0
Convolution conv2d_13 1 1 activation_12_blob conv2d_13_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_13 1 1 conv2d_13_blob batch_normalization_13_blob 0=64 1=1.000000e-03
BinaryOp add_4 2 1 batch_normalization_13_blob activation_11_Split_blob_idx_1 add_4_blob 0=0 1=0 2=0.000000e+00
ReLU activation_13 1 1 add_4_blob activation_13_blob 0=0.000000e+00 1=0
Split activation_13_Split 1 2 activation_13_blob activation_13_Split_blob_idx_0 activation_13_Split_blob_idx_1
Convolution conv2d_14 1 1 activation_13_Split_blob_idx_0 conv2d_14_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_14 1 1 conv2d_14_blob batch_normalization_14_blob 0=64 1=1.000000e-03
ReLU activation_14 1 1 batch_normalization_14_blob activation_14_blob 0=0.000000e+00 1=0
Convolution conv2d_15 1 1 activation_14_blob conv2d_15_blob 0=64 1=3 2=1 3=1 4=-233 5=1 6=36864 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_15 1 1 conv2d_15_blob batch_normalization_15_blob 0=64 1=1.000000e-03
BinaryOp add_5 2 1 batch_normalization_15_blob activation_13_Split_blob_idx_1 add_5_blob 0=0 1=0 2=0.000000e+00
ReLU activation_15 1 1 add_5_blob activation_15_blob 0=0.000000e+00 1=0
Pooling max_pooling2d_4 1 1 activation_15_blob max_pooling2d_4_blob 0=0 1=2 11=2 2=2 12=2 3=0 4=0 5=1
Convolution conv2d_16 1 1 max_pooling2d_4_blob conv2d_16_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=73728 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_16 1 1 conv2d_16_blob batch_normalization_16_blob 0=128 1=1.000000e-03
ReLU activation_16 1 1 batch_normalization_16_blob activation_16_blob 0=0.000000e+00 1=0
Split activation_16_Split 1 2 activation_16_blob activation_16_Split_blob_idx_0 activation_16_Split_blob_idx_1
Convolution conv2d_17 1 1 activation_16_Split_blob_idx_0 conv2d_17_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_17 1 1 conv2d_17_blob batch_normalization_17_blob 0=128 1=1.000000e-03
ReLU activation_17 1 1 batch_normalization_17_blob activation_17_blob 0=0.000000e+00 1=0
Convolution conv2d_18 1 1 activation_17_blob conv2d_18_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_18 1 1 conv2d_18_blob batch_normalization_18_blob 0=128 1=1.000000e-03
BinaryOp add_6 2 1 batch_normalization_18_blob activation_16_Split_blob_idx_1 add_6_blob 0=0 1=0 2=0.000000e+00
ReLU activation_18 1 1 add_6_blob activation_18_blob 0=0.000000e+00 1=0
Split activation_18_Split 1 2 activation_18_blob activation_18_Split_blob_idx_0 activation_18_Split_blob_idx_1
Convolution conv2d_19 1 1 activation_18_Split_blob_idx_0 conv2d_19_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_19 1 1 conv2d_19_blob batch_normalization_19_blob 0=128 1=1.000000e-03
ReLU activation_19 1 1 batch_normalization_19_blob activation_19_blob 0=0.000000e+00 1=0
Convolution conv2d_20 1 1 activation_19_blob conv2d_20_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_20 1 1 conv2d_20_blob batch_normalization_20_blob 0=128 1=1.000000e-03
BinaryOp add_7 2 1 batch_normalization_20_blob activation_18_Split_blob_idx_1 add_7_blob 0=0 1=0 2=0.000000e+00
ReLU activation_20 1 1 add_7_blob activation_20_blob 0=0.000000e+00 1=0
Split activation_20_Split 1 2 activation_20_blob activation_20_Split_blob_idx_0 activation_20_Split_blob_idx_1
Convolution conv2d_21 1 1 activation_20_Split_blob_idx_0 conv2d_21_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_21 1 1 conv2d_21_blob batch_normalization_21_blob 0=128 1=1.000000e-03
ReLU activation_21 1 1 batch_normalization_21_blob activation_21_blob 0=0.000000e+00 1=0
Convolution conv2d_22 1 1 activation_21_blob conv2d_22_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_22 1 1 conv2d_22_blob batch_normalization_22_blob 0=128 1=1.000000e-03
BinaryOp add_8 2 1 batch_normalization_22_blob activation_20_Split_blob_idx_1 add_8_blob 0=0 1=0 2=0.000000e+00
ReLU activation_22 1 1 add_8_blob activation_22_blob 0=0.000000e+00 1=0
Split activation_22_Split 1 2 activation_22_blob activation_22_Split_blob_idx_0 activation_22_Split_blob_idx_1
Convolution conv2d_23 1 1 activation_22_Split_blob_idx_0 conv2d_23_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_23 1 1 conv2d_23_blob batch_normalization_23_blob 0=128 1=1.000000e-03
ReLU activation_23 1 1 batch_normalization_23_blob activation_23_blob 0=0.000000e+00 1=0
Convolution conv2d_24 1 1 activation_23_blob conv2d_24_blob 0=128 1=3 2=1 3=1 4=-233 5=1 6=147456 9=0 11=3 12=1 13=1
BatchNorm batch_normalization_24 1 1 conv2d_24_blob batch_normalization_24_blob 0=128 1=1.000000e-03
BinaryOp add_9 2 1 batch_normalization_24_blob activation_22_Split_blob_idx_1 add_9_blob 0=0 1=0 2=0.000000e+00
ReLU activation_24 1 1 add_9_blob activation_24_blob 0=0.000000e+00 1=0
Split activation_24_Split 1 2 activation_24_blob activation_24_Split_blob_idx_0 activation_24_Split_blob_idx_1
Convolution conv2d_25 1 1 activation_24_Split_blob_idx_0 conv2d_25_blob 0=2 1=3 2=1 3=1 4=-233 5=1 6=2304 9=0 11=3 12=1 13=1
Softmax conv2d_25_Softmax 1 1 conv2d_25_blob conv2d_25_Softmax_blob 0=0
Convolution conv2d_26 1 1 activation_24_Split_blob_idx_1 conv2d_26_blob 0=6 1=3 2=1 3=1 4=-233 5=1 6=6912 9=0 11=3 12=1 13=1
Concat concatenate_1 2 1 conv2d_26_blob conv2d_25_Softmax_blob concatenate_1_blob 0=0

25
dsexample_lib/Makefile Executable file
View File

@ -0,0 +1,25 @@
################################################################################
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#################################################################################
all:
gcc -ggdb -c -o dsexample_lib.o -fPIC dsexample_lib.c
ar rcs libdsexample.a dsexample_lib.o

94
dsexample_lib/dsexample_lib.c Executable file
View File

@ -0,0 +1,94 @@
/**
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "dsexample_lib.h"
#include <stdio.h>
#include <stdlib.h>
struct DsExampleCtx
{
DsExampleInitParams initParams;
};
DsExampleCtx *
DsExampleCtxInit (DsExampleInitParams * initParams)
{
DsExampleCtx *ctx = (DsExampleCtx *) calloc (1, sizeof (DsExampleCtx));
ctx->initParams = *initParams;
return ctx;
}
// In case of an actual processing library, processing on data wil be completed
// in this function and output will be returned
DsExampleOutput *
DsExampleProcess (DsExampleCtx * ctx, unsigned char *data)
{
DsExampleOutput *out =
(DsExampleOutput*)calloc (1, sizeof (DsExampleOutput));
if (data != NULL)
{
// Process your data here
}
// Fill output structure using processed output
// Here, we fake some detected objects and labels
if (ctx->initParams.fullFrame)
{
out->numObjects = 2;
out->object[0] = (DsExampleObject)
{
(float)(ctx->initParams.processingWidth) / 8,
(float)(ctx->initParams.processingHeight) / 8,
(float)(ctx->initParams.processingWidth) / 8,
(float)(ctx->initParams.processingHeight) / 8, "Obj0"
};
out->object[1] = (DsExampleObject)
{
(float)(ctx->initParams.processingWidth) / 2,
(float)(ctx->initParams.processingHeight) / 2,
(float)(ctx->initParams.processingWidth) / 8,
(float)(ctx->initParams.processingHeight) / 8, "Obj1"
};
}
else
{
out->numObjects = 1;
out->object[0] = (DsExampleObject)
{
(float)(ctx->initParams.processingWidth) / 8,
(float)(ctx->initParams.processingHeight) / 8,
(float)(ctx->initParams.processingWidth) / 8,
(float)(ctx->initParams.processingHeight) / 8, ""
};
// Set the object label
snprintf (out->object[0].label, 64, "Obj_label");
}
return out;
}
void
DsExampleCtxDeinit (DsExampleCtx * ctx)
{
free (ctx);
}

74
dsexample_lib/dsexample_lib.h Executable file
View File

@ -0,0 +1,74 @@
/**
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __DSEXAMPLE_LIB__
#define __DSEXAMPLE_LIB__
#define MAX_LABEL_SIZE 128
#ifdef __cplusplus
extern "C" {
#endif
typedef struct DsExampleCtx DsExampleCtx;
// Init parameters structure as input, required for instantiating dsexample_lib
typedef struct
{
// Width at which frame/object will be scaled
int processingWidth;
// height at which frame/object will be scaled
int processingHeight;
// Flag to indicate whether operating on crops of full frame
int fullFrame;
} DsExampleInitParams;
// Detected/Labelled object structure, stores bounding box info along with label
typedef struct
{
float left;
float top;
float width;
float height;
char label[MAX_LABEL_SIZE];
} DsExampleObject;
// Output data returned after processing
typedef struct
{
int numObjects;
DsExampleObject object[4];
} DsExampleOutput;
// Initialize library context
DsExampleCtx * DsExampleCtxInit (DsExampleInitParams *init_params);
// Dequeue processed output
DsExampleOutput *DsExampleProcess (DsExampleCtx *ctx, unsigned char *data);
// Deinitialize library context
void DsExampleCtxDeinit (DsExampleCtx *ctx);
#ifdef __cplusplus
}
#endif
#endif

Binary file not shown.

Binary file not shown.

1198
gstdsexample.cpp Executable file

File diff suppressed because it is too large Load Diff

126
gstdsexample.h Executable file
View File

@ -0,0 +1,126 @@
/**
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GST_DSEXAMPLE_H__
#define __GST_DSEXAMPLE_H__
#include <gst/base/gstbasetransform.h>
#include <gst/video/video.h>
/* Open CV headers */
#ifdef WITH_OPENCV
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#endif
#include <cuda.h>
#include <cuda_runtime.h>
#include "nvbufsurface.h"
#include "nvbufsurftransform.h"
#include "gst-nvquery.h"
#include "gstnvdsmeta.h"
#include "dsexample_lib/dsexample_lib.h"
/* Package and library details required for plugin_init */
#define PACKAGE "dsexample"
#define VERSION "1.0"
#define LICENSE "Proprietary"
#define DESCRIPTION "NVIDIA example plugin for integration with DeepStream on DGPU"
#define BINARY_PACKAGE "NVIDIA DeepStream 3rdparty IP integration example plugin"
#define URL "http://nvidia.com/"
G_BEGIN_DECLS
/* Standard boilerplate stuff */
typedef struct _GstDsExample GstDsExample;
typedef struct _GstDsExampleClass GstDsExampleClass;
/* Standard boilerplate stuff */
#define GST_TYPE_DSEXAMPLE (gst_dsexample_get_type())
#define GST_DSEXAMPLE(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_DSEXAMPLE,GstDsExample))
#define GST_DSEXAMPLE_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_DSEXAMPLE,GstDsExampleClass))
#define GST_DSEXAMPLE_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj), GST_TYPE_DSEXAMPLE, GstDsExampleClass))
#define GST_IS_DSEXAMPLE(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_DSEXAMPLE))
#define GST_IS_DSEXAMPLE_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_DSEXAMPLE))
#define GST_DSEXAMPLE_CAST(obj) ((GstDsExample *)(obj))
struct _GstDsExample
{
GstBaseTransform base_trans;
// Context of the custom algorithm library
DsExampleCtx *dsexamplelib_ctx;
// Unique ID of the element. The labels generated by the element will be
// updated at index `unique_id` of attr_info array in NvDsObjectParams.
guint unique_id;
// Frame number of the current input buffer
guint64 frame_num;
// CUDA Stream used for allocating the CUDA task
cudaStream_t cuda_stream;
// Host buffer to store RGB data for use by algorithm
void *host_rgb_buf;
// the intermediate scratch buffer for conversions RGBA
NvBufSurface *inter_buf;
#ifdef WITH_OPENCV
// OpenCV mat containing RGB data
cv::Mat *cvmat;
#endif
// Input video info (resolution, color format, framerate, etc)
GstVideoInfo video_info;
// Resolution at which frames/objects should be processed
gint processing_width;
gint processing_height;
// Flag which defince igpu/dgpu
guint is_integrated;
// Amount of objects processed in single call to algorithm
guint batch_size;
// GPU ID on which we expect to execute the task
guint gpu_id;
// Boolean indicating if entire frame or cropped objects should be processed
gboolean process_full_frame;
// Boolean indicating if to blur the detected objects
gboolean blur_objects;
};
// Boiler plate stuff
struct _GstDsExampleClass
{
GstBaseTransformClass parent_class;
};
GType gst_dsexample_get_type (void);
G_END_DECLS
#endif /* __GST_DSEXAMPLE_H__ */

BIN
gstdsexample.o Normal file

Binary file not shown.

1301
gstdsexample_optimized.cpp Executable file

File diff suppressed because it is too large Load Diff

215
gstdsexample_optimized.h Executable file
View File

@ -0,0 +1,215 @@
/**
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __GST_DSEXAMPLE_H__
#define __GST_DSEXAMPLE_H__
#include <gst/base/gstbasetransform.h>
#include <gst/video/video.h>
/* Open CV headers */
#ifdef WITH_OPENCV
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#endif
#include <cuda.h>
#include <cuda_runtime.h>
#include "nvbufsurface.h"
#include "nvbufsurftransform.h"
#include "gst-nvquery.h"
#include "gstnvdsmeta.h"
#include "dsexample_lib/dsexample_lib.h"
#include "nvtx3/nvToolsExt.h"
#include <condition_variable>
#include <mutex>
#include <thread>
#include <vector>
/* Package and library details required for plugin_init */
#define PACKAGE "dsexample"
#define VERSION "1.0"
#define LICENSE "Proprietary"
#define DESCRIPTION "NVIDIA example plugin for integration with DeepStream on DGPU/Jetson"
#define BINARY_PACKAGE "NVIDIA DeepStream 3rdparty IP integration example plugin"
#define URL "http://nvidia.com/"
G_BEGIN_DECLS
/* Standard boilerplate stuff */
typedef struct _GstDsExample GstDsExample;
typedef struct _GstDsExampleClass GstDsExampleClass;
/* Standard boilerplate stuff */
#define GST_TYPE_DSEXAMPLE (gst_dsexample_get_type())
#define GST_DSEXAMPLE(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_DSEXAMPLE,GstDsExample))
#define GST_DSEXAMPLE_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_DSEXAMPLE,GstDsExampleClass))
#define GST_DSEXAMPLE_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS((obj), GST_TYPE_DSEXAMPLE, GstDsExampleClass))
#define GST_IS_DSEXAMPLE(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_DSEXAMPLE))
#define GST_IS_DSEXAMPLE_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_DSEXAMPLE))
#define GST_DSEXAMPLE_CAST(obj) ((GstDsExample *)(obj))
/** Maximum batch size to be supported by dsexample. */
#define NVDSEXAMPLE_MAX_BATCH_SIZE G_MAXUINT
struct _GstDsExample
{
GstBaseTransform base_trans;
/** Context of the custom algorithm library */
DsExampleCtx *dsexamplelib_ctx;
/** Processing Queue and related synchronization structures. */
/** Gmutex lock for against shared access in threads**/
GMutex process_lock;
/** Queue to send data to output thread for processing**/
GQueue *process_queue;
/** Gcondition for process queue**/
GCond process_cond;
/**Queue to receive processed data from output thread **/
GQueue *buf_queue;
/** Gcondition for buf queue **/
GCond buf_cond;
/** Output thread. */
GThread *process_thread;
/** Boolean to signal output thread to stop. */
gboolean stop;
/** Unique ID of the element. Used to identify metadata
* generated by this element. */
guint unique_id;
/** Frame number of the current input buffer */
guint64 frame_num;
/** CUDA Stream used for allocating the CUDA task */
cudaStream_t cuda_stream;
/** Temporary NvBufSurface for batched transformations. */
NvBufSurface batch_insurf;
/** the intermediate scratch buffer for conversions RGBA */
NvBufSurface *inter_buf;
/** Input video info (resolution, color format, framerate, etc) */
GstVideoInfo video_info;
/** Resolution at which frames/objects should be processed */
gint processing_width;
gint processing_height;
/** Maximum batch size. */
guint max_batch_size;
/** GPU ID on which we expect to execute the task */
guint gpu_id;
/** Boolean indicating if entire frame or cropped objects should be processed */
gboolean process_full_frame;
/** Current batch number of the input batch. */
gulong current_batch_num;
/** GstFlowReturn returned by the latest buffer pad push. */
GstFlowReturn last_flow_ret;
/** Config params required by NvBufSurfTransform API. */
NvBufSurfTransformConfigParams transform_config_params;
/** Parameters to use for transforming buffers. */
NvBufSurfTransformParams transform_params;
/** NVTX Domain. */
nvtxDomainHandle_t nvtx_domain;
};
typedef struct
{
/** Ratio by which the frame / object crop was scaled in the horizontal
* direction. Required when scaling co-ordinates/sizes in metadata
* back to input resolution. */
gdouble scale_ratio_x = 0.0;
/** Ratio by which the frame / object crop was scaled in the vertical
* direction. Required when scaling co-ordinates/sizes in metadata
* back to input resolution. */
gdouble scale_ratio_y = 0.0;
/** NvDsObjectParams belonging to the object to be classified. */
NvDsObjectMeta *obj_meta = nullptr;
NvDsFrameMeta *frame_meta = nullptr;
/** Index of the frame in the batched input GstBuffer. Not required for
* classifiers. */
guint batch_index = 0;
/** Frame number of the frame from the source. */
gulong frame_num = 0;
/** The buffer structure the object / frame was converted from. */
NvBufSurfaceParams *input_surf_params = nullptr;
} GstDsExampleFrame;
/**
* Holds information about the batch of frames to be inferred.
*/
typedef struct
{
/** Vector of frames in the batch. */
std::vector < GstDsExampleFrame > frames;
/** Pointer to the input GstBuffer. */
GstBuffer *inbuf = nullptr;
/** Batch number of the input batch. */
gulong inbuf_batch_num = 0;
/** Boolean indicating that the output thread should only push the buffer to
* downstream element. If set to true, a corresponding batch has not been
* queued at the input of NvDsExampleContext and hence dequeuing of output is
* not required. */
gboolean push_buffer = FALSE;
/** Boolean marking this batch as an event marker. This is only used for
* synchronization. The output loop does not process on the batch.
*/
gboolean event_marker = FALSE;
#ifdef WITH_OPENCV
/** OpenCV mat containing RGB data */
cv::Mat * cvmat;
#else
NvBufSurface *inter_buf;
#endif
nvtxRangeId_t nvtx_complete_buf_range = 0;
} GstDsExampleBatch;
/** Boiler plate stuff */
struct _GstDsExampleClass
{
GstBaseTransformClass parent_class;
};
GType gst_dsexample_get_type (void);
G_END_DECLS
#endif /* __GST_DSEXAMPLE_H__ */

View File

@ -0,0 +1,92 @@
#pragma once
#include "layer.h"
#include "net.h"
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <float.h>
#include <stdio.h>
#include <vector>
#include <time.h>
#include "algorithm"
#include "vector"
#include <fstream>
#include <regex>
#include "config.h"
class YoloV5FocusCharacterDetection : public ncnn::Layer
{
public:
YoloV5FocusCharacterDetection()
{
one_blob_only = true;
}
virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int outw = w / 2;
int outh = h / 2;
int outc = channels * 4;
top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
if (top_blob.empty())
return -100;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outc; p++)
{
const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
*outptr = *ptr;
outptr += 1;
ptr += 2;
}
ptr += w;
}
}
return 0;
}
};
class CharacterDetector{
public:
CharacterDetector();
inline float intersection_area(const Object& a, const Object& b);
void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right);
void qsort_descent_inplace(std::vector<Object>& objects);
void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold);
void generate_grids_and_stride(const int target_size, std::vector<int>& strides, std::vector<GridAndStride>& grid_strides);
void generate_yolox_proposals(std::vector<GridAndStride> grid_strides, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects);
std::vector<Character> nms(std::vector<Character> &chars);
std::string cluster(std::vector<Character> &chars, std::string mode, bool& isModeCorrect);
bool isOverlaped(cv::Rect rect1, cv::Rect rect2);
std::vector<std::string> splitOCR(std::string rawOcr);
std::vector<std::string> splitString(const std::string& s, const std::string& c);
std::vector<ObjectDetect> Detect(cv::Mat frame);
std::string extract(cv::Mat rawPlate, std::string mode);
private:
float minWidth = 15;
int useRuleHead = 0;
int useRuleMid = 0;
int useRuleTail = 0;
ncnn::Net net;
float yolox_nms_thresh = 0.3;
float yolox_conf_thresh = 0.3;
int yolox_target_size = 416;
};

70
include/CppThread.h Executable file
View File

@ -0,0 +1,70 @@
#ifndef __CPP_THREAD_H_
#define __CPP_THREAD_H_
/**
* GNU GENERAL PUBLIC LICENSE
* Version 3, 29 June 2007
*
* (C) 2020-2021, Bernd Porr <mail@bernporr.me.uk>
**/
#include <thread>
/**
* A thin wrapper around the C++ thread model to avoid
* a static callback. Instead just inherit this class
* and overload run() which then runs in this thread.
* This is header-only so that it can be performed
* inline for max performance.
**/
class CppThread {
public:
/**
* Starts the thread.
**/
inline void start() {
isRun = true;
if (mthread) return;
mthread=new std::thread([this]{this->run();});
}
inline void sleep(int time) {
std::this_thread::sleep_for(std::chrono::milliseconds(time));
}
/**
* Waits for the thread to terminate.
**/
inline void join() {
if (nullptr != mthread) {
mthread->join();
mthread = nullptr;
}
}
inline void stop() {
if (!mthread) return;
delete mthread;
}
inline bool isRunning() {
return this->isRun;
}
protected:
/**
* This method does all the work of this thread.
* Overload this abstract function with
* a real one doing the actual work of this thread.
**/
virtual void run() = 0;
private:
bool isRun = false;
// pointer to the thread
std::thread *mthread=NULL;
};
#endif

59
include/CropLP.h Normal file
View File

@ -0,0 +1,59 @@
#include <iostream>
#include "structs.h"
#include <chrono>
#include "cuda_runtime_api.h"
#include "CropLP_logging.h"
#include "CropLP_common.hpp"
#include <math.h>
#define DEVICE 0
#define NET s // s m l x
#define NETSTRUCT(str) createEngine_##str
#define CREATENET(net) NETSTRUCT(net)
#define STR1(x) #x
#define STR2(x) STR1(x)
// #define USE_FP16 // comment out this if want to use FP16
#define CONF_THRESH 0.51
#define BATCH_SIZE 1
// stuff we know about the network and the input/output blobs
static const int INPUT_H_UNET = 224;
static const int INPUT_W_UNET = 224;
static const int OUTPUT_SIZE_UNET = 224*224;
using namespace nvinfer1;
static Logger gLogger;
struct DetectionCropLP{
float mask[INPUT_W_UNET * INPUT_H_UNET * 1];
};
class CropLP{
private:
std::string model_path = "/home/thai/py_script/MTA/unet-segment/unet/license_ckpt_crop/22-0.9823-0.0604.trt";
char *trtModelStream{nullptr};
size_t size{0};
IRuntime* runtime{nullptr};
ICudaEngine* engine{nullptr};
IExecutionContext* context{nullptr};
const char* INPUT_BLOB_NAME_UNET = "input_1";
const char* OUTPUT_BLOB_NAME_UNET = "conv2d_23";
public:
static CropLP &getInst()
{
static CropLP instance;
return instance;
};
CropLP();
void _init();
float eculidDistance(cv::Point p1, cv::Point p2);
void validate_point(cv::Point2f &tl, cv::Point2f &tr, cv::Point2f &br, cv::Point2f &bl, cv::Mat img_ori);
cv::Mat preprocess_img(cv::Mat& img);
void doInference(IExecutionContext& context, float* input, float* output, int batchSize);
void process_cls_result(DetectionCropLP &res, float *output);
float sigmoid(float x);
cv::Mat segmentation(cv::Mat img);
cv::Mat Crop(cv::Mat mask, cv::Mat img_ori, std::string mode);
std::vector<cv::Mat> predict(cv::Mat inputImage, std::string mode);
};

148
include/CropLP_common.hpp Executable file
View File

@ -0,0 +1,148 @@
#ifndef UNET_COMMON_H_
#define UNET_COMMON_H_
#include <fstream>
#include <map>
#include <sstream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include "NvInfer.h"
#define CHECK(status) \
do\
{\
auto ret = (status);\
if (ret != 0)\
{\
std::cerr << "Cuda failure: " << ret << std::endl;\
abort();\
}\
} while (0)
using namespace nvinfer1;
// TensorRT weight files have a simple space delimited format:
// [type] [size] <data x size in hex>
// std::map<std::string, Weights> loadWeights(const std::string file) {
// std::cout << "Loading weights: " << file << std::endl;
// std::map<std::string, Weights> weightMap;
// // Open weights file
// std::ifstream input(file);
// assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!");
// // Read number of weight blobs
// int32_t count;
// input >> count;
// assert(count > 0 && "Invalid weight map file.");
// while (count--)
// {
// Weights wt{DataType::kFLOAT, nullptr, 0};
// uint32_t size;
// // Read name and type of blob
// std::string name;
// input >> name >> std::dec >> size;
// wt.type = DataType::kFLOAT;
// // Load blob
// uint32_t* val = reinterpret_cast<uint32_t*>(malloc(sizeof(val) * size));
// for (uint32_t x = 0, y = size; x < y; ++x)
// {
// input >> std::hex >> val[x];
// }
// wt.values = val;
// wt.count = size;
// weightMap[name] = wt;
// }
// return weightMap;
// }
// IScaleLayer* addBatchNorm2d(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, std::string lname, float eps) {
// float *gamma = (float*)weightMap[lname + ".weight"].values;
// float *beta = (float*)weightMap[lname + ".bias"].values;
// float *mean = (float*)weightMap[lname + ".running_mean"].values;
// float *var = (float*)weightMap[lname + ".running_var"].values;
// int len = weightMap[lname + ".running_var"].count;
// float *scval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
// for (int i = 0; i < len; i++) {
// scval[i] = gamma[i] / sqrt(var[i] + eps);
// }
// Weights scale{DataType::kFLOAT, scval, len};
// float *shval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
// for (int i = 0; i < len; i++) {
// shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps);
// }
// Weights shift{DataType::kFLOAT, shval, len};
// float *pval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
// for (int i = 0; i < len; i++) {
// pval[i] = 1.0;
// }
// Weights power{DataType::kFLOAT, pval, len};
// weightMap[lname + ".scale"] = scale;
// weightMap[lname + ".shift"] = shift;
// weightMap[lname + ".power"] = power;
// IScaleLayer* scale_1 = network->addScale(input, ScaleMode::kCHANNEL, shift, scale, power);
// assert(scale_1);
// return scale_1;
// }
// ILayer* convBlock(INetworkDefinition *network, std::map<std::string, Weights>& weightMap, ITensor& input, int outch, int ksize, int s, int g, std::string lname) {
// Weights emptywts{DataType::kFLOAT, nullptr, 0};
// int p = ksize / 2;
// IConvolutionLayer* conv1 = network->addConvolutionNd(input, outch, DimsHW{ksize, ksize}, weightMap[lname + ".conv.weight"], emptywts);
// assert(conv1);
// conv1->setStrideNd(DimsHW{s, s});
// conv1->setPaddingNd(DimsHW{p, p});
// conv1->setNbGroups(g);
// IScaleLayer* bn1 = addBatchNorm2d(network, weightMap, *conv1->getOutput(0), lname + ".bn", 1e-3);
// // hard_swish = x * hard_sigmoid
// auto hsig = network->addActivation(*bn1->getOutput(0), ActivationType::kHARD_SIGMOID);
// assert(hsig);
// hsig->setAlpha(1.0 / 6.0);
// hsig->setBeta(0.5);
// auto ew = network->addElementWise(*bn1->getOutput(0), *hsig->getOutput(0), ElementWiseOperation::kPROD);
// assert(ew);
// return ew;
// }
// int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
// DIR *p_dir = opendir(p_dir_name);
// if (p_dir == nullptr) {
// return -1;
// }
// struct dirent* p_file = nullptr;
// while ((p_file = readdir(p_dir)) != nullptr) {
// if (strcmp(p_file->d_name, ".") != 0 &&
// strcmp(p_file->d_name, "..") != 0) {
// //std::string cur_file_name(p_dir_name);
// //cur_file_name += "/";
// //cur_file_name += p_file->d_name;
// std::string cur_file_name(p_file->d_name);
// file_names.push_back(cur_file_name);
// }
// }
// closedir(p_dir);
// return 0;
// }
#endif

503
include/CropLP_logging.h Executable file
View File

@ -0,0 +1,503 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(&timestamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
void log(Severity severity, const char* msg) noexcept override
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H

View File

@ -0,0 +1,35 @@
#pragma once
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "net.h"
#include <vector>
#include <stdio.h>
#include "config.h"
using namespace cv;
using namespace std;
class CropPlate {
public:
CropPlate();
vector<cv::Mat> cropFormFrame(cv::Mat frame, string mode);
private:
ncnn::Net net;
int size_input;
float detect_threshold;
float img_size = 272.0;
float wh_ratio = 2.0;
private:
void sort(std::vector<DLable> &dLables);
cv::Mat cropImage4Point(cv::Point2f rect_points[], cv::Mat img, cv::Size size);
void memcpy_array(float dir[][4], float src[][4]);
float IOU(DLable label1, DLable label2);
std::vector<DLable> nms(std::vector<DLable> dLables, float iou_threshold=.5, float prob_threshold=.5);
float length(cv::Point2f p1, cv::Point2f p2);
vector<cv::Mat> reconstruct(cv::Mat img, ncnn::Mat out_probs, ncnn::Mat out_box, float threshold = 0.7, string mode = "");
};

10
include/GetPlateColor.h Normal file
View File

@ -0,0 +1,10 @@
#pragma once
#include <iostream>
#include "config.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
std::string checkColorPlate(cv::Mat img_bgr, std::string mode);

20
include/LPRprocessor.h Normal file
View File

@ -0,0 +1,20 @@
#pragma once
#include "iostream"
#include "CppThread.h"
#include "structs.h"
#include "LicensePlateRecognition.h"
#include "queue"
#include "mutex"
class LPRprocessor : public CppThread
{
public:
LPRprocessor(std::queue<LicensePlateItem> &qLicensePlate_, std::mutex &mutexThread_);
void run();
private:
LicensePlateRecognizer licensePlateRecognizer;
std::queue<LicensePlateItem> &qLicensePlate;
std::mutex &mutexThread;
std::string dstSaveImg_0 = "/home/thai/Desktop/test-OCR/source_0/";
std::string dstSaveImg_1 = "/home/thai/Desktop/test-OCR/source_1/";
};

View File

@ -0,0 +1,26 @@
#pragma once
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include "GetPlateColor.h"
// #include "CropLicensePlate.h"
#include "CharacterDetection.h"
#include "CropLP.h"
class LicensePlateRecognizer {
private:
// CropPlate cropPlate;
CropLP cropLP;
CharacterDetector characterDetector;
public:
LicensePlateRecognizer();
static LicensePlateRecognizer &getInst()
{
static LicensePlateRecognizer instance;
return instance;
};
PlateDetail recognition(cv::Mat frame, std::string mode);
};

View File

@ -0,0 +1,68 @@
#include <iostream>
#include <chrono>
#include <cmath>
#include "cuda_utils.h"
#include "logging.h"
#include "utils.h"
#include "preprocess.h"
#include "structs.h"
#include <fstream>
#include <map>
#include <sstream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "yololayer.h"
using namespace nvinfer1;
#define USE_FP32 // set USE_INT8 or USE_FP16 or USE_FP32
#define DEVICE 0 // GPU id
#define NMS_THRESH_YOLO 0.4
#define CONF_THRESH_YOLO_HEAD 0.75
#define CONF_THRESH_YOLO_CHAIR 0.8
#define BATCH_SIZE_YOLO 1
#define MAX_IMAGE_INPUT_SIZE_THRESH 3000 * 3000 // ensure it exceed the maximum size in the input images !
class DetectorCOCO{
public:
static DetectorCOCO &getInst()
{
static DetectorCOCO instance;
return instance;
};
void _init();
DetectorCOCO();
cv::Rect get_rect_yolov5(cv::Mat& img, float bbox[4]);
float iou_yolov5(float lbox[4], float rbox[4]);
static bool cmp_yolov5(const Yolo::Detection& a, const Yolo::Detection& b);
void nms_yolov5(std::vector<Yolo::Detection>& res, float *output, float conf_thresh, float nms_thresh);
void doInference(IExecutionContext& context, cudaStream_t& stream, void **buffers, float* output, int batchSize);
std::vector<ObjectDetect> predict(cv::Mat inputImage, std::string type);
private:
const int INPUT_H = Yolo::INPUT_H;
const int INPUT_W = Yolo::INPUT_W;
const int CLASS_NUM = Yolo::CLASS_NUM;
const int OUTPUT_SIZE = Yolo::MAX_OUTPUT_BBOX_COUNT * sizeof(Yolo::Detection) / sizeof(float) + 1;
const char* INPUT_BLOB_NAME = "data";
const char* OUTPUT_BLOB_NAME = "prob";
Logger gLogger;
std::string engine_name_head = "/opt/nvidia/deepstream/deepstream-6.0/sources/apps/sample_apps/deepstream-app/Model_Classify/verify/crowdhuman_0-body_1-head__yolov5m.engine";
std::string engine_name_chair = "/media/thai/A0B6A6B3B6A688FC2/code/PROJECT_BI/CLONE/tensorrtx/yolov5/build/20220615_weight_80class.engine";
IRuntime* runtime_head;
IRuntime* runtime_chair;
ICudaEngine* engine_head;
ICudaEngine* engine_chair;
IExecutionContext* context_head;
IExecutionContext* context_chair;
};

18
include/YOLOv5/cuda_utils.h Executable file
View File

@ -0,0 +1,18 @@
#ifndef TRTX_CUDA_UTILS_H_
#define TRTX_CUDA_UTILS_H_
#include <cuda_runtime_api.h>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)\
{\
cudaError_t error_code = callstr;\
if (error_code != cudaSuccess) {\
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
assert(0);\
}\
}
#endif // CUDA_CHECK
#endif // TRTX_CUDA_UTILS_H_

504
include/YOLOv5/logging.h Executable file
View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENSORRT_LOGGING_H
#define TENSORRT_LOGGING_H
#include "NvInferRuntimeCommon.h"
#include <cassert>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
#include "macros.h"
using Severity = nvinfer1::ILogger::Severity;
class LogStreamConsumerBuffer : public std::stringbuf
{
public:
LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mOutput(stream)
, mPrefix(prefix)
, mShouldLog(shouldLog)
{
}
LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
: mOutput(other.mOutput)
{
}
~LogStreamConsumerBuffer()
{
// std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
// std::streambuf::pptr() gives a pointer to the current position of the output sequence
// if the pointer to the beginning is not equal to the pointer to the current position,
// call putOutput() to log the output to the stream
if (pbase() != pptr())
{
putOutput();
}
}
// synchronizes the stream buffer and returns 0 on success
// synchronizing the stream buffer consists of inserting the buffer contents into the stream,
// resetting the buffer and flushing the stream
virtual int sync()
{
putOutput();
return 0;
}
void putOutput()
{
if (mShouldLog)
{
// prepend timestamp
std::time_t timestamp = std::time(nullptr);
tm* tm_local = std::localtime(&timestamp);
std::cout << "[";
std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
// std::stringbuf::str() gets the string contents of the buffer
// insert the buffer contents pre-appended by the appropriate prefix into the stream
mOutput << mPrefix << str();
// set the buffer to empty
str("");
// flush the stream
mOutput.flush();
}
}
void setShouldLog(bool shouldLog)
{
mShouldLog = shouldLog;
}
private:
std::ostream& mOutput;
std::string mPrefix;
bool mShouldLog;
};
//!
//! \class LogStreamConsumerBase
//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
//!
class LogStreamConsumerBase
{
public:
LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
: mBuffer(stream, prefix, shouldLog)
{
}
protected:
LogStreamConsumerBuffer mBuffer;
};
//!
//! \class LogStreamConsumer
//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
//! Order of base classes is LogStreamConsumerBase and then std::ostream.
//! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
//! in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
//! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
//! Please do not change the order of the parent classes.
//!
class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
{
public:
//! \brief Creates a LogStreamConsumer which logs messages with level severity.
//! Reportable severity determines if the messages are severe enough to be logged.
LogStreamConsumer(Severity reportableSeverity, Severity severity)
: LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(severity <= reportableSeverity)
, mSeverity(severity)
{
}
LogStreamConsumer(LogStreamConsumer&& other)
: LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
, std::ostream(&mBuffer) // links the stream buffer with the stream
, mShouldLog(other.mShouldLog)
, mSeverity(other.mSeverity)
{
}
void setReportableSeverity(Severity reportableSeverity)
{
mShouldLog = mSeverity <= reportableSeverity;
mBuffer.setShouldLog(mShouldLog);
}
private:
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
static std::string severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
bool mShouldLog;
Severity mSeverity;
};
//! \class Logger
//!
//! \brief Class which manages logging of TensorRT tools and samples
//!
//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
//! and supports logging two types of messages:
//!
//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
//! - Test pass/fail messages
//!
//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
//!
//! In the future, this class could be extended to support dumping test results to a file in some standard format
//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
//!
//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
//! library and messages coming from the sample.
//!
//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
//! object.
class Logger : public nvinfer1::ILogger
{
public:
Logger(Severity severity = Severity::kWARNING)
: mReportableSeverity(severity)
{
}
//!
//! \enum TestResult
//! \brief Represents the state of a given test
//!
enum class TestResult
{
kRUNNING, //!< The test is running
kPASSED, //!< The test passed
kFAILED, //!< The test failed
kWAIVED //!< The test was waived
};
//!
//! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
//! \return The nvinfer1::ILogger associated with this Logger
//!
//! TODO Once all samples are updated to use this method to register the logger with TensorRT,
//! we can eliminate the inheritance of Logger from ILogger
//!
nvinfer1::ILogger& getTRTLogger()
{
return *this;
}
//!
//! \brief Implementation of the nvinfer1::ILogger::log() virtual method
//!
//! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
//! inheritance from nvinfer1::ILogger
//!
void log(Severity severity, const char* msg) TRT_NOEXCEPT override
{
LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
}
//!
//! \brief Method for controlling the verbosity of logging output
//!
//! \param severity The logger will only emit messages that have severity of this level or higher.
//!
void setReportableSeverity(Severity severity)
{
mReportableSeverity = severity;
}
//!
//! \brief Opaque handle that holds logging information for a particular test
//!
//! This object is an opaque handle to information used by the Logger to print test results.
//! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
//! with Logger::reportTest{Start,End}().
//!
class TestAtom
{
public:
TestAtom(TestAtom&&) = default;
private:
friend class Logger;
TestAtom(bool started, const std::string& name, const std::string& cmdline)
: mStarted(started)
, mName(name)
, mCmdline(cmdline)
{
}
bool mStarted;
std::string mName;
std::string mCmdline;
};
//!
//! \brief Define a test for logging
//!
//! \param[in] name The name of the test. This should be a string starting with
//! "TensorRT" and containing dot-separated strings containing
//! the characters [A-Za-z0-9_].
//! For example, "TensorRT.sample_googlenet"
//! \param[in] cmdline The command line used to reproduce the test
//
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
//!
static TestAtom defineTest(const std::string& name, const std::string& cmdline)
{
return TestAtom(false, name, cmdline);
}
//!
//! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
//! as input
//!
//! \param[in] name The name of the test
//! \param[in] argc The number of command-line arguments
//! \param[in] argv The array of command-line arguments (given as C strings)
//!
//! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
{
auto cmdline = genCmdlineString(argc, argv);
return defineTest(name, cmdline);
}
//!
//! \brief Report that a test has started.
//!
//! \pre reportTestStart() has not been called yet for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has started
//!
static void reportTestStart(TestAtom& testAtom)
{
reportTestResult(testAtom, TestResult::kRUNNING);
assert(!testAtom.mStarted);
testAtom.mStarted = true;
}
//!
//! \brief Report that a test has ended.
//!
//! \pre reportTestStart() has been called for the given testAtom
//!
//! \param[in] testAtom The handle to the test that has ended
//! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
//! TestResult::kFAILED, TestResult::kWAIVED
//!
static void reportTestEnd(const TestAtom& testAtom, TestResult result)
{
assert(result != TestResult::kRUNNING);
assert(testAtom.mStarted);
reportTestResult(testAtom, result);
}
static int reportPass(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kPASSED);
return EXIT_SUCCESS;
}
static int reportFail(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kFAILED);
return EXIT_FAILURE;
}
static int reportWaive(const TestAtom& testAtom)
{
reportTestEnd(testAtom, TestResult::kWAIVED);
return EXIT_SUCCESS;
}
static int reportTest(const TestAtom& testAtom, bool pass)
{
return pass ? reportPass(testAtom) : reportFail(testAtom);
}
Severity getReportableSeverity() const
{
return mReportableSeverity;
}
private:
//!
//! \brief returns an appropriate string for prefixing a log message with the given severity
//!
static const char* severityPrefix(Severity severity)
{
switch (severity)
{
case Severity::kINTERNAL_ERROR: return "[F] ";
case Severity::kERROR: return "[E] ";
case Severity::kWARNING: return "[W] ";
case Severity::kINFO: return "[I] ";
case Severity::kVERBOSE: return "[V] ";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate string for prefixing a test result message with the given result
//!
static const char* testResultString(TestResult result)
{
switch (result)
{
case TestResult::kRUNNING: return "RUNNING";
case TestResult::kPASSED: return "PASSED";
case TestResult::kFAILED: return "FAILED";
case TestResult::kWAIVED: return "WAIVED";
default: assert(0); return "";
}
}
//!
//! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
//!
static std::ostream& severityOstream(Severity severity)
{
return severity >= Severity::kINFO ? std::cout : std::cerr;
}
//!
//! \brief method that implements logging test results
//!
static void reportTestResult(const TestAtom& testAtom, TestResult result)
{
severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
<< testAtom.mCmdline << std::endl;
}
//!
//! \brief generate a command line string from the given (argc, argv) values
//!
static std::string genCmdlineString(int argc, char const* const* argv)
{
std::stringstream ss;
for (int i = 0; i < argc; i++)
{
if (i > 0)
ss << " ";
ss << argv[i];
}
return ss.str();
}
Severity mReportableSeverity;
};
namespace
{
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
//!
//! Example usage:
//!
//! LOG_VERBOSE(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
//!
//! Example usage:
//!
//! LOG_INFO(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_INFO(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
//!
//! Example usage:
//!
//! LOG_WARN(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_WARN(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
//!
//! Example usage:
//!
//! LOG_ERROR(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_ERROR(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
}
//!
//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
// ("fatal" severity)
//!
//! Example usage:
//!
//! LOG_FATAL(logger) << "hello world" << std::endl;
//!
inline LogStreamConsumer LOG_FATAL(const Logger& logger)
{
return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
}
} // anonymous namespace
#endif // TENSORRT_LOGGING_H

27
include/YOLOv5/macros.h Executable file
View File

@ -0,0 +1,27 @@
#ifndef __MACROS_H
#define __MACROS_H
#ifdef API_EXPORTS
#if defined(_MSC_VER)
#define API __declspec(dllexport)
#else
#define API __attribute__((visibility("default")))
#endif
#else
#if defined(_MSC_VER)
#define API __declspec(dllimport)
#else
#define API
#endif
#endif // API_EXPORTS
#if NV_TENSORRT_MAJOR >= 8
#define TRT_NOEXCEPT noexcept
#define TRT_CONST_ENQUEUE const
#else
#define TRT_NOEXCEPT
#define TRT_CONST_ENQUEUE
#endif
#endif // __MACROS_H

16
include/YOLOv5/preprocess.h Executable file
View File

@ -0,0 +1,16 @@
#ifndef __PREPROCESS_H
#define __PREPROCESS_H
#include <cuda_runtime.h>
#include <cstdint>
struct AffineMatrix{
float value[6];
};
void preprocess_kernel_img(uint8_t* src, int src_width, int src_height,
float* dst, int dst_width, int dst_height,
cudaStream_t stream);
#endif // __PREPROCESS_H

52
include/YOLOv5/utils.h Executable file
View File

@ -0,0 +1,52 @@
#ifndef TRTX_YOLOV5_UTILS_H_
#define TRTX_YOLOV5_UTILS_H_
#include <dirent.h>
#include <opencv2/opencv.hpp>
static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
int w, h, x, y;
float r_w = input_w / (img.cols*1.0);
float r_h = input_h / (img.rows*1.0);
if (r_h > r_w) {
w = input_w;
h = r_w * img.rows;
x = 0;
y = (input_h - h) / 2;
} else {
w = r_h * img.cols;
h = input_h;
x = (input_w - w) / 2;
y = 0;
}
cv::Mat re(h, w, CV_8UC3);
cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
return out;
}
static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
DIR *p_dir = opendir(p_dir_name);
if (p_dir == nullptr) {
return -1;
}
struct dirent* p_file = nullptr;
while ((p_file = readdir(p_dir)) != nullptr) {
if (strcmp(p_file->d_name, ".") != 0 &&
strcmp(p_file->d_name, "..") != 0) {
//std::string cur_file_name(p_dir_name);
//cur_file_name += "/";
//cur_file_name += p_file->d_name;
std::string cur_file_name(p_file->d_name);
file_names.push_back(cur_file_name);
}
}
closedir(p_dir);
return 0;
}
#endif // TRTX_YOLOV5_UTILS_H_

138
include/YOLOv5/yololayer.h Executable file
View File

@ -0,0 +1,138 @@
#ifndef _YOLO_LAYER_H
#define _YOLO_LAYER_H
#include <vector>
#include <string>
#include <NvInfer.h>
#include "macros.h"
namespace Yolo
{
static constexpr int CHECK_COUNT = 3;
static constexpr float IGNORE_THRESH = 0.1f;
struct YoloKernel
{
int width;
int height;
float anchors[CHECK_COUNT * 2];
};
static constexpr int MAX_OUTPUT_BBOX_COUNT = 1000;
static constexpr int CLASS_NUM = 80;
static constexpr int INPUT_H = 640; // yolov5's input height and width must be divisible by 32.
static constexpr int INPUT_W = 640;
static constexpr int LOCATIONS = 4;
struct alignas(float) Detection {
//center_x center_y w h
float bbox[LOCATIONS];
float conf; // bbox_conf * cls_conf
float class_id;
};
}
namespace nvinfer1
{
class API YoloLayerPlugin : public IPluginV2IOExt
{
public:
YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel);
YoloLayerPlugin(const void* data, size_t length);
~YoloLayerPlugin();
int getNbOutputs() const TRT_NOEXCEPT override
{
return 1;
}
Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
int initialize() TRT_NOEXCEPT override;
virtual void terminate() TRT_NOEXCEPT override {};
virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
virtual int enqueue(int batchSize, const void* const* inputs, void*TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
}
const char* getPluginType() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
void destroy() TRT_NOEXCEPT override;
IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
const char* getPluginNamespace() const TRT_NOEXCEPT override;
DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT override;
bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
void attachToContext(
cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT override;
void detachFromContext() TRT_NOEXCEPT override;
private:
void forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize = 1);
int mThreadCount = 256;
const char* mPluginNamespace;
int mKernelCount;
int mClassCount;
int mYoloV5NetWidth;
int mYoloV5NetHeight;
int mMaxOutObject;
std::vector<Yolo::YoloKernel> mYoloKernel;
void** mAnchor;
};
class API YoloPluginCreator : public IPluginCreator
{
public:
YoloPluginCreator();
~YoloPluginCreator() override = default;
const char* getPluginName() const TRT_NOEXCEPT override;
const char* getPluginVersion() const TRT_NOEXCEPT override;
const PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT override;
IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override
{
mNamespace = libNamespace;
}
const char* getPluginNamespace() const TRT_NOEXCEPT override
{
return mNamespace.c_str();
}
private:
std::string mNamespace;
static PluginFieldCollection mFC;
static std::vector<PluginField> mPluginAttributes;
};
REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
};
#endif // _YOLO_LAYER_H

23
include/config.h Normal file
View File

@ -0,0 +1,23 @@
#pragma once
#include "json.hpp"
#include <iostream>
#include <fstream>
#include "structs.h"
class CF
{
public:
static CF& inst() {
static CF instance;
return instance;
}
CF(CF const&) = delete;
void operator=(CF const&) = delete;
void init();
nlohmann::json data;
private:
CF();
std::string linkFileConfig = "/opt/nvidia/deepstream/deepstream-6.0/sources/gst-plugins/gst-dsexample-OCR/data/config.json";
};

24659
include/json.hpp Executable file

File diff suppressed because it is too large Load Diff

72
include/structs.h Normal file
View File

@ -0,0 +1,72 @@
#pragma once
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
const std::string SQUARE = "square";
const std::string LONG = "long";
const std::string NONE = "";
const std::string RED = "red";
const std::string BLUE = "blue";
const std::string WHITE = "white";
const std::string YELLOW = "yellow";
struct LicensePlateItem {
int sourceID;
int trackID;
int counterTrack = 0;
std::string mode = "";
cv::Mat imgLicensePlate;
};
struct DLable {
float pts[2][4] = {};
cv::Point2f tf;
cv::Point2f br;
int cl;
float prob;
};
struct Character {
cv::Rect box;
float centerX;
float centerY;
float prob;
std::string label;
};
struct PlateDetail
{
std::string ocr;
cv::Mat plate;
string mode;
string color;
bool isCheck = false;
};
struct ObjectDetect
{
cv::Rect_<float> box;
int label;
float conf;
std::string class_name;
};
struct Object
{
cv::Rect_<float> rect;
int label;
float prob;
std::string name;
};
struct GridAndStride
{
int grid0;
int grid1;
int stride;
};

BIN
lib_ncnn/libncnn.a Executable file

Binary file not shown.

440
lib_ncnn/ncnn/allocator.h Executable file
View File

@ -0,0 +1,440 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_ALLOCATOR_H
#define NCNN_ALLOCATOR_H
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include "platform.h"
#include <stdlib.h>
#if NCNN_VULKAN
#include <vulkan/vulkan.h>
#endif // NCNN_VULKAN
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
#include <android/hardware_buffer.h>
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
namespace ncnn {
// the alignment of all the allocated buffers
#if NCNN_AVX512
#define NCNN_MALLOC_ALIGN 64
#elif NCNN_AVX
#define NCNN_MALLOC_ALIGN 32
#else
#define NCNN_MALLOC_ALIGN 16
#endif
// we have some optimized kernels that may overread buffer a bit in loop
// it is common to interleave next-loop data load with arithmetic instructions
// allocating more bytes keeps us safe from SEGV_ACCERR failure
#define NCNN_MALLOC_OVERREAD 64
// Aligns a pointer to the specified number of bytes
// ptr Aligned pointer
// n Alignment size that must be a power of two
template<typename _Tp>
static NCNN_FORCEINLINE _Tp* alignPtr(_Tp* ptr, int n = (int)sizeof(_Tp))
{
return (_Tp*)(((size_t)ptr + n - 1) & -n);
}
// Aligns a buffer size to the specified number of bytes
// The function returns the minimum number that is greater or equal to sz and is divisible by n
// sz Buffer size to align
// n Alignment size that must be a power of two
static NCNN_FORCEINLINE size_t alignSize(size_t sz, int n)
{
return (sz + n - 1) & -n;
}
static NCNN_FORCEINLINE void* fastMalloc(size_t size)
{
#if _MSC_VER
return _aligned_malloc(size, NCNN_MALLOC_ALIGN);
#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17)
void* ptr = 0;
if (posix_memalign(&ptr, NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD))
ptr = 0;
return ptr;
#elif __ANDROID__ && __ANDROID_API__ < 17
return memalign(NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD);
#else
unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + NCNN_MALLOC_ALIGN + NCNN_MALLOC_OVERREAD);
if (!udata)
return 0;
unsigned char** adata = alignPtr((unsigned char**)udata + 1, NCNN_MALLOC_ALIGN);
adata[-1] = udata;
return adata;
#endif
}
static NCNN_FORCEINLINE void fastFree(void* ptr)
{
if (ptr)
{
#if _MSC_VER
_aligned_free(ptr);
#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17)
free(ptr);
#elif __ANDROID__ && __ANDROID_API__ < 17
free(ptr);
#else
unsigned char* udata = ((unsigned char**)ptr)[-1];
free(udata);
#endif
}
}
#if NCNN_THREADS
// exchange-add operation for atomic operations on reference counters
#if defined __riscv && !defined __riscv_atomic
// riscv target without A extension
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#elif defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
// atomic increment on the linux version of the Intel(tm) compiler
#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
#elif defined __GNUC__
#if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
#ifdef __ATOMIC_ACQ_REL
#define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
#else
#define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
#endif
#else
#if defined __ATOMIC_ACQ_REL && !defined __clang__
// version for gcc >= 4.7
#define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
#else
#define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
#endif
#endif
#elif defined _MSC_VER && !defined RC_INVOKED
#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
#else
// thread-unsafe branch
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#endif
#else // NCNN_THREADS
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#endif // NCNN_THREADS
class NCNN_EXPORT Allocator
{
public:
virtual ~Allocator();
virtual void* fastMalloc(size_t size) = 0;
virtual void fastFree(void* ptr) = 0;
};
class PoolAllocatorPrivate;
class NCNN_EXPORT PoolAllocator : public Allocator
{
public:
PoolAllocator();
~PoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
PoolAllocator(const PoolAllocator&);
PoolAllocator& operator=(const PoolAllocator&);
private:
PoolAllocatorPrivate* const d;
};
class UnlockedPoolAllocatorPrivate;
class NCNN_EXPORT UnlockedPoolAllocator : public Allocator
{
public:
UnlockedPoolAllocator();
~UnlockedPoolAllocator();
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
UnlockedPoolAllocator(const UnlockedPoolAllocator&);
UnlockedPoolAllocator& operator=(const UnlockedPoolAllocator&);
private:
UnlockedPoolAllocatorPrivate* const d;
};
#if NCNN_VULKAN
class VulkanDevice;
class NCNN_EXPORT VkBufferMemory
{
public:
VkBuffer buffer;
// the base offset assigned by allocator
size_t offset;
size_t capacity;
VkDeviceMemory memory;
void* mapped_ptr;
// buffer state, modified by command functions internally
mutable VkAccessFlags access_flags;
mutable VkPipelineStageFlags stage_flags;
// initialize and modified by mat
int refcount;
};
class NCNN_EXPORT VkImageMemory
{
public:
VkImage image;
VkImageView imageview;
// underlying info assigned by allocator
int width;
int height;
int depth;
VkFormat format;
VkDeviceMemory memory;
void* mapped_ptr;
// the base offset assigned by allocator
size_t bind_offset;
size_t bind_capacity;
// image state, modified by command functions internally
mutable VkAccessFlags access_flags;
mutable VkImageLayout image_layout;
mutable VkPipelineStageFlags stage_flags;
// in-execution state, modified by command functions internally
mutable int command_refcount;
// initialize and modified by mat
int refcount;
};
class NCNN_EXPORT VkAllocator
{
public:
explicit VkAllocator(const VulkanDevice* _vkdev);
virtual ~VkAllocator();
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size) = 0;
virtual void fastFree(VkBufferMemory* ptr) = 0;
virtual int flush(VkBufferMemory* ptr);
virtual int invalidate(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack) = 0;
virtual void fastFree(VkImageMemory* ptr) = 0;
public:
const VulkanDevice* vkdev;
uint32_t buffer_memory_type_index;
uint32_t image_memory_type_index;
uint32_t reserved_type_index;
bool mappable;
bool coherent;
protected:
VkBuffer create_buffer(size_t size, VkBufferUsageFlags usage);
VkDeviceMemory allocate_memory(size_t size, uint32_t memory_type_index);
VkDeviceMemory allocate_dedicated_memory(size_t size, uint32_t memory_type_index, VkImage image, VkBuffer buffer);
VkImage create_image(int width, int height, int depth, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage);
VkImageView create_imageview(VkImage image, VkFormat format);
};
class VkBlobAllocatorPrivate;
class NCNN_EXPORT VkBlobAllocator : public VkAllocator
{
public:
explicit VkBlobAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 16 * 1024 * 1024); // 16M
virtual ~VkBlobAllocator();
public:
// release all budgets immediately
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkBlobAllocator(const VkBlobAllocator&);
VkBlobAllocator& operator=(const VkBlobAllocator&);
private:
VkBlobAllocatorPrivate* const d;
};
class VkWeightAllocatorPrivate;
class NCNN_EXPORT VkWeightAllocator : public VkAllocator
{
public:
explicit VkWeightAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 8 * 1024 * 1024); // 8M
virtual ~VkWeightAllocator();
public:
// release all blocks immediately
virtual void clear();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkWeightAllocator(const VkWeightAllocator&);
VkWeightAllocator& operator=(const VkWeightAllocator&);
private:
VkWeightAllocatorPrivate* const d;
};
class VkStagingAllocatorPrivate;
class NCNN_EXPORT VkStagingAllocator : public VkAllocator
{
public:
explicit VkStagingAllocator(const VulkanDevice* vkdev);
virtual ~VkStagingAllocator();
public:
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkStagingAllocator(const VkStagingAllocator&);
VkStagingAllocator& operator=(const VkStagingAllocator&);
private:
VkStagingAllocatorPrivate* const d;
};
class VkWeightStagingAllocatorPrivate;
class NCNN_EXPORT VkWeightStagingAllocator : public VkAllocator
{
public:
explicit VkWeightStagingAllocator(const VulkanDevice* vkdev);
virtual ~VkWeightStagingAllocator();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkWeightStagingAllocator(const VkWeightStagingAllocator&);
VkWeightStagingAllocator& operator=(const VkWeightStagingAllocator&);
private:
VkWeightStagingAllocatorPrivate* const d;
};
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
class NCNN_EXPORT VkAndroidHardwareBufferImageAllocator : public VkAllocator
{
public:
VkAndroidHardwareBufferImageAllocator(const VulkanDevice* _vkdev, AHardwareBuffer* _hb);
virtual ~VkAndroidHardwareBufferImageAllocator();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkAndroidHardwareBufferImageAllocator(const VkAndroidHardwareBufferImageAllocator&);
VkAndroidHardwareBufferImageAllocator& operator=(const VkAndroidHardwareBufferImageAllocator&);
public:
int init();
int width() const;
int height() const;
uint64_t external_format() const;
public:
AHardwareBuffer* hb;
AHardwareBuffer_Desc bufferDesc;
VkAndroidHardwareBufferFormatPropertiesANDROID bufferFormatProperties;
VkAndroidHardwareBufferPropertiesANDROID bufferProperties;
VkSamplerYcbcrConversionKHR samplerYcbcrConversion;
};
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
#endif // NCNN_VULKAN
} // namespace ncnn
#endif // NCNN_ALLOCATOR_H

36
lib_ncnn/ncnn/benchmark.h Executable file
View File

@ -0,0 +1,36 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BENCHMARK_H
#define NCNN_BENCHMARK_H
#include "layer.h"
#include "mat.h"
#include "platform.h"
namespace ncnn {
// get now timestamp in ms
NCNN_EXPORT double get_current_time();
#if NCNN_BENCHMARK
NCNN_EXPORT void benchmark(const Layer* layer, double start, double end);
NCNN_EXPORT void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
#endif // NCNN_BENCHMARK
} // namespace ncnn
#endif // NCNN_BENCHMARK_H

44
lib_ncnn/ncnn/blob.h Executable file
View File

@ -0,0 +1,44 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BLOB_H
#define NCNN_BLOB_H
#include "mat.h"
#include "platform.h"
namespace ncnn {
class NCNN_EXPORT Blob
{
public:
// empty
Blob();
public:
#if NCNN_STRING
// blob name
std::string name;
#endif // NCNN_STRING
// layer index which produce this blob as output
int producer;
// layer index which need this blob as input
int consumer;
// shape hint
Mat shape;
};
} // namespace ncnn
#endif // NCNN_BLOB_H

320
lib_ncnn/ncnn/c_api.h Executable file
View File

@ -0,0 +1,320 @@
/* Tencent is pleased to support the open source community by making ncnn available.
*
* Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
#ifndef NCNN_C_API_H
#define NCNN_C_API_H
#include "platform.h"
#if NCNN_C_API
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
NCNN_EXPORT const char* ncnn_version();
/* allocator api */
typedef struct __ncnn_allocator_t* ncnn_allocator_t;
struct NCNN_EXPORT __ncnn_allocator_t
{
void* pthis;
void* (*fast_malloc)(ncnn_allocator_t allocator, size_t size);
void (*fast_free)(ncnn_allocator_t allocator, void* ptr);
};
NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_pool_allocator();
NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_unlocked_pool_allocator();
NCNN_EXPORT void ncnn_allocator_destroy(ncnn_allocator_t allocator);
/* option api */
typedef struct __ncnn_option_t* ncnn_option_t;
NCNN_EXPORT ncnn_option_t ncnn_option_create();
NCNN_EXPORT void ncnn_option_destroy(ncnn_option_t opt);
NCNN_EXPORT int ncnn_option_get_num_threads(const ncnn_option_t opt);
NCNN_EXPORT void ncnn_option_set_num_threads(ncnn_option_t opt, int num_threads);
NCNN_EXPORT int ncnn_option_get_use_vulkan_compute(const ncnn_option_t opt);
NCNN_EXPORT void ncnn_option_set_use_vulkan_compute(ncnn_option_t opt, int use_vulkan_compute);
/* mat api */
typedef struct __ncnn_mat_t* ncnn_mat_t;
NCNN_EXPORT ncnn_mat_t ncnn_mat_create();
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d(int w, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d(int w, int h, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d(int w, int h, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d(int w, int h, int d, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d(int w, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d(int w, int h, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d(int w, int h, int c, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d(int w, int h, int d, int c, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d_elem(int w, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d_elem(int w, int h, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d_elem(int w, int h, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d_elem(int w, int h, int d, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d_elem(int w, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d_elem(int w, int h, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d_elem(int w, int h, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d_elem(int w, int h, int d, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT void ncnn_mat_destroy(ncnn_mat_t mat);
NCNN_EXPORT void ncnn_mat_fill_float(ncnn_mat_t mat, float v);
NCNN_EXPORT ncnn_mat_t ncnn_mat_clone(const ncnn_mat_t mat, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_1d(const ncnn_mat_t mat, int w, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_2d(const ncnn_mat_t mat, int w, int h, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_3d(const ncnn_mat_t mat, int w, int h, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_4d(const ncnn_mat_t mat, int w, int h, int d, int c, ncnn_allocator_t allocator);
NCNN_EXPORT int ncnn_mat_get_dims(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_w(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_h(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_d(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_c(const ncnn_mat_t mat);
NCNN_EXPORT size_t ncnn_mat_get_elemsize(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_elempack(const ncnn_mat_t mat);
NCNN_EXPORT size_t ncnn_mat_get_cstep(const ncnn_mat_t mat);
NCNN_EXPORT void* ncnn_mat_get_data(const ncnn_mat_t mat);
NCNN_EXPORT void* ncnn_mat_get_channel_data(const ncnn_mat_t mat, int c);
#if NCNN_PIXEL
/* mat pixel api */
#define NCNN_MAT_PIXEL_RGB 1
#define NCNN_MAT_PIXEL_BGR 2
#define NCNN_MAT_PIXEL_GRAY 3
#define NCNN_MAT_PIXEL_RGBA 4
#define NCNN_MAT_PIXEL_BGRA 5
#define NCNN_MAT_PIXEL_X2Y(X, Y) (X | (Y << 16))
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels(const unsigned char* pixels, int type, int w, int h, int stride, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int stride, int target_width, int target_height, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, int target_width, int target_height, ncnn_allocator_t allocator);
NCNN_EXPORT void ncnn_mat_to_pixels(const ncnn_mat_t mat, unsigned char* pixels, int type, int stride);
NCNN_EXPORT void ncnn_mat_to_pixels_resize(const ncnn_mat_t mat, unsigned char* pixels, int type, int target_width, int target_height, int target_stride);
#endif /* NCNN_PIXEL */
NCNN_EXPORT void ncnn_mat_substract_mean_normalize(ncnn_mat_t mat, const float* mean_vals, const float* norm_vals);
NCNN_EXPORT void ncnn_convert_packing(const ncnn_mat_t src, ncnn_mat_t* dst, int elempack, const ncnn_option_t opt);
NCNN_EXPORT void ncnn_flatten(const ncnn_mat_t src, ncnn_mat_t* dst, const ncnn_option_t opt);
/* blob api */
typedef struct __ncnn_blob_t* ncnn_blob_t;
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_blob_get_name(const ncnn_blob_t blob);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_blob_get_producer(const ncnn_blob_t blob);
NCNN_EXPORT int ncnn_blob_get_consumer(const ncnn_blob_t blob);
NCNN_EXPORT void ncnn_blob_get_shape(const ncnn_blob_t blob, int* dims, int* w, int* h, int* c);
/* paramdict api */
typedef struct __ncnn_paramdict_t* ncnn_paramdict_t;
NCNN_EXPORT ncnn_paramdict_t ncnn_paramdict_create();
NCNN_EXPORT void ncnn_paramdict_destroy(ncnn_paramdict_t pd);
NCNN_EXPORT int ncnn_paramdict_get_type(const ncnn_paramdict_t pd, int id);
NCNN_EXPORT int ncnn_paramdict_get_int(const ncnn_paramdict_t pd, int id, int def);
NCNN_EXPORT float ncnn_paramdict_get_float(const ncnn_paramdict_t pd, int id, float def);
NCNN_EXPORT ncnn_mat_t ncnn_paramdict_get_array(const ncnn_paramdict_t pd, int id, const ncnn_mat_t def);
NCNN_EXPORT void ncnn_paramdict_set_int(ncnn_paramdict_t pd, int id, int i);
NCNN_EXPORT void ncnn_paramdict_set_float(ncnn_paramdict_t pd, int id, float f);
NCNN_EXPORT void ncnn_paramdict_set_array(ncnn_paramdict_t pd, int id, const ncnn_mat_t v);
/* datareader api */
typedef struct __ncnn_datareader_t* ncnn_datareader_t;
struct NCNN_EXPORT __ncnn_datareader_t
{
void* pthis;
#if NCNN_STRING
int (*scan)(ncnn_datareader_t dr, const char* format, void* p);
#endif /* NCNN_STRING */
size_t (*read)(ncnn_datareader_t dr, void* buf, size_t size);
};
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create();
#if NCNN_STDIO
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_stdio(FILE* fp);
#endif /* NCNN_STDIO */
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_memory(const unsigned char** mem);
NCNN_EXPORT void ncnn_datareader_destroy(ncnn_datareader_t dr);
/* modelbin api */
typedef struct __ncnn_modelbin_t* ncnn_modelbin_t;
struct NCNN_EXPORT __ncnn_modelbin_t
{
void* pthis;
ncnn_mat_t (*load_1d)(const ncnn_modelbin_t mb, int w, int type);
ncnn_mat_t (*load_2d)(const ncnn_modelbin_t mb, int w, int h, int type);
ncnn_mat_t (*load_3d)(const ncnn_modelbin_t mb, int w, int h, int c, int type);
};
NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_datareader(const ncnn_datareader_t dr);
NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_mat_array(const ncnn_mat_t* weights, int n);
NCNN_EXPORT void ncnn_modelbin_destroy(ncnn_modelbin_t mb);
/* layer api */
typedef struct __ncnn_layer_t* ncnn_layer_t;
struct NCNN_EXPORT __ncnn_layer_t
{
void* pthis;
int (*load_param)(ncnn_layer_t layer, const ncnn_paramdict_t pd);
int (*load_model)(ncnn_layer_t layer, const ncnn_modelbin_t mb);
int (*create_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt);
int (*destroy_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt);
int (*forward_1)(const ncnn_layer_t layer, const ncnn_mat_t bottom_blob, ncnn_mat_t* top_blob, const ncnn_option_t opt);
int (*forward_n)(const ncnn_layer_t layer, const ncnn_mat_t* bottom_blobs, int n, ncnn_mat_t* top_blobs, int n2, const ncnn_option_t opt);
int (*forward_inplace_1)(const ncnn_layer_t layer, ncnn_mat_t bottom_top_blob, const ncnn_option_t opt);
int (*forward_inplace_n)(const ncnn_layer_t layer, ncnn_mat_t* bottom_top_blobs, int n, const ncnn_option_t opt);
};
NCNN_EXPORT ncnn_layer_t ncnn_layer_create();
NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_typeindex(int typeindex);
#if NCNN_STRING
NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_type(const char* type);
#endif /* NCNN_STRING */
NCNN_EXPORT void ncnn_layer_destroy(ncnn_layer_t layer);
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_layer_get_name(const ncnn_layer_t layer);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_layer_get_typeindex(const ncnn_layer_t layer);
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_layer_get_type(const ncnn_layer_t layer);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_layer_get_one_blob_only(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_inplace(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_vulkan(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_packing(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_bf16_storage(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_fp16_storage(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_image_storage(const ncnn_layer_t layer);
NCNN_EXPORT void ncnn_layer_set_one_blob_only(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_inplace(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_vulkan(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_packing(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_bf16_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_fp16_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_image_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT int ncnn_layer_get_bottom_count(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_bottom(const ncnn_layer_t layer, int i);
NCNN_EXPORT int ncnn_layer_get_top_count(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_top(const ncnn_layer_t layer, int i);
NCNN_EXPORT void ncnn_blob_get_bottom_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c);
NCNN_EXPORT void ncnn_blob_get_top_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c);
/* layer factory function */
typedef ncnn_layer_t (*ncnn_layer_creator_t)(void* userdata);
typedef void (*ncnn_layer_destroyer_t)(ncnn_layer_t layer, void* userdata);
typedef struct __ncnn_net_custom_layer_factory_t* ncnn_net_custom_layer_factory_t;
struct __ncnn_net_custom_layer_factory_t
{
ncnn_layer_creator_t creator;
ncnn_layer_destroyer_t destroyer;
void* userdata;
ncnn_net_custom_layer_factory_t next;
};
/* net api */
typedef struct __ncnn_net_t* ncnn_net_t;
struct __ncnn_net_t
{
void* pthis;
ncnn_net_custom_layer_factory_t custom_layer_factory;
};
NCNN_EXPORT ncnn_net_t ncnn_net_create();
NCNN_EXPORT void ncnn_net_destroy(ncnn_net_t net);
NCNN_EXPORT void ncnn_net_set_option(ncnn_net_t net, ncnn_option_t opt);
#if NCNN_STRING
NCNN_EXPORT void ncnn_net_register_custom_layer_by_type(ncnn_net_t net, const char* type, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata);
#endif /* NCNN_STRING */
NCNN_EXPORT void ncnn_net_register_custom_layer_by_typeindex(ncnn_net_t net, int typeindex, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata);
#if NCNN_STDIO
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param(ncnn_net_t net, const char* path);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_net_load_param_bin(ncnn_net_t net, const char* path);
NCNN_EXPORT int ncnn_net_load_model(ncnn_net_t net, const char* path);
#endif /* NCNN_STDIO */
#if NCNN_STDIO
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param_memory(ncnn_net_t net, const char* mem);
#endif /* NCNN_STRING */
#endif /* NCNN_STDIO */
NCNN_EXPORT int ncnn_net_load_param_bin_memory(ncnn_net_t net, const unsigned char* mem);
NCNN_EXPORT int ncnn_net_load_model_memory(ncnn_net_t net, const unsigned char* mem);
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_net_load_param_bin_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
NCNN_EXPORT int ncnn_net_load_model_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
NCNN_EXPORT void ncnn_net_clear(ncnn_net_t net);
/* extractor api */
typedef struct __ncnn_extractor_t* ncnn_extractor_t;
NCNN_EXPORT ncnn_extractor_t ncnn_extractor_create(ncnn_net_t net);
NCNN_EXPORT void ncnn_extractor_destroy(ncnn_extractor_t ex);
NCNN_EXPORT void ncnn_extractor_set_option(ncnn_extractor_t ex, const ncnn_option_t opt);
#if NCNN_STRING
NCNN_EXPORT int ncnn_extractor_input(ncnn_extractor_t ex, const char* name, const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_extractor_extract(ncnn_extractor_t ex, const char* name, ncnn_mat_t* mat);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_extractor_input_index(ncnn_extractor_t ex, int index, const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_extractor_extract_index(ncnn_extractor_t ex, int index, ncnn_mat_t* mat);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* NCNN_C_API */
#endif /* NCNN_C_API_H */

136
lib_ncnn/ncnn/command.h Executable file
View File

@ -0,0 +1,136 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_COMMAND_H
#define NCNN_COMMAND_H
#include "platform.h"
#if NCNN_VULKAN
#include "mat.h"
#include <vulkan/vulkan.h>
namespace ncnn {
class Pipeline;
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
class ImportAndroidHardwareBufferPipeline;
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
class VkComputePrivate;
class NCNN_EXPORT VkCompute
{
public:
explicit VkCompute(const VulkanDevice* vkdev);
virtual ~VkCompute();
public:
void record_upload(const Mat& src, VkMat& dst, const Option& opt);
void record_upload(const Mat& src, VkImageMat& dst, const Option& opt);
void record_download(const VkMat& src, Mat& dst, const Option& opt);
void record_download(const VkImageMat& src, Mat& dst, const Option& opt);
void record_buffer_to_image(const VkMat& src, VkImageMat& dst, const Option& opt);
void record_image_to_buffer(const VkImageMat& src, VkMat& dst, const Option& opt);
void record_clone(const Mat& src, VkMat& dst, const Option& opt);
void record_clone(const Mat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkMat& src, Mat& dst, const Option& opt);
void record_clone(const VkImageMat& src, Mat& dst, const Option& opt);
void record_clone(const VkMat& src, VkMat& dst, const Option& opt);
void record_clone(const VkImageMat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkMat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkImageMat& src, VkMat& dst, const Option& opt);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkImageMat>& bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const Mat& dispatcher);
#if NCNN_BENCHMARK
void record_write_timestamp(uint32_t query);
#endif // NCNN_BENCHMARK
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkMat& dst);
void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkImageMat& dst);
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
int submit_and_wait();
int reset();
#if NCNN_BENCHMARK
int create_query_pool(uint32_t query_count);
int get_query_pool_results(uint32_t first_query, uint32_t query_count, std::vector<uint64_t>& results);
#endif // NCNN_BENCHMARK
protected:
const VulkanDevice* vkdev;
void barrier_readwrite(const VkMat& binding);
void barrier_readwrite(const VkImageMat& binding);
void barrier_readonly(const VkImageMat& binding);
private:
VkComputePrivate* const d;
};
class VkTransferPrivate;
class NCNN_EXPORT VkTransfer
{
public:
explicit VkTransfer(const VulkanDevice* vkdev);
virtual ~VkTransfer();
public:
void record_upload(const Mat& src, VkMat& dst, const Option& opt, bool flatten = true);
void record_upload(const Mat& src, VkImageMat& dst, const Option& opt);
int submit_and_wait();
protected:
const VulkanDevice* vkdev;
private:
VkTransferPrivate* const d;
};
} // namespace ncnn
#endif // NCNN_VULKAN
#endif // NCNN_COMMAND_H

131
lib_ncnn/ncnn/cpu.h Executable file
View File

@ -0,0 +1,131 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_CPU_H
#define NCNN_CPU_H
#include <stddef.h>
#if defined __ANDROID__ || defined __linux__
#include <sched.h> // cpu_set_t
#endif
#include "platform.h"
namespace ncnn {
class NCNN_EXPORT CpuSet
{
public:
CpuSet();
void enable(int cpu);
void disable(int cpu);
void disable_all();
bool is_enabled(int cpu) const;
int num_enabled() const;
public:
#if defined __ANDROID__ || defined __linux__
cpu_set_t cpu_set;
#endif
#if __APPLE__
unsigned int policy;
#endif
};
// test optional cpu features
// neon = armv7 neon or aarch64 asimd
NCNN_EXPORT int cpu_support_arm_neon();
// vfpv4 = armv7 fp16 + fma
NCNN_EXPORT int cpu_support_arm_vfpv4();
// asimdhp = aarch64 asimd half precision
NCNN_EXPORT int cpu_support_arm_asimdhp();
// asimddp = aarch64 asimd dot product
NCNN_EXPORT int cpu_support_arm_asimddp();
// avx = x86 avx
NCNN_EXPORT int cpu_support_x86_avx();
// fma = x86 fma
NCNN_EXPORT int cpu_support_x86_fma();
// xop = x86 xop
NCNN_EXPORT int cpu_support_x86_xop();
// f16c = x86 f16c
NCNN_EXPORT int cpu_support_x86_f16c();
// avx2 = x86 avx2 + fma + f16c
NCNN_EXPORT int cpu_support_x86_avx2();
// avx_vnni = x86 avx vnni
NCNN_EXPORT int cpu_support_x86_avx_vnni();
// avx512 = x86 avx512f + avx512cd + avx512bw + avx512dq + avx512vl
NCNN_EXPORT int cpu_support_x86_avx512();
// avx512_vnni = x86 avx512 vnni
NCNN_EXPORT int cpu_support_x86_avx512_vnni();
// msa = mips mas
NCNN_EXPORT int cpu_support_mips_msa();
// mmi = loongson mmi
NCNN_EXPORT int cpu_support_loongson_mmi();
// v = riscv vector
NCNN_EXPORT int cpu_support_riscv_v();
// zfh = riscv half-precision float
NCNN_EXPORT int cpu_support_riscv_zfh();
// vlenb = riscv vector length in bytes
NCNN_EXPORT int cpu_riscv_vlenb();
// cpu info
NCNN_EXPORT int get_cpu_count();
NCNN_EXPORT int get_little_cpu_count();
NCNN_EXPORT int get_big_cpu_count();
// bind all threads on little clusters if powersave enabled
// affects HMP arch cpu like ARM big.LITTLE
// only implemented on android at the moment
// switching powersave is expensive and not thread-safe
// 0 = all cores enabled(default)
// 1 = only little clusters enabled
// 2 = only big clusters enabled
// return 0 if success for setter function
NCNN_EXPORT int get_cpu_powersave();
NCNN_EXPORT int set_cpu_powersave(int powersave);
// convenient wrapper
NCNN_EXPORT const CpuSet& get_cpu_thread_affinity_mask(int powersave);
// set explicit thread affinity
NCNN_EXPORT int set_cpu_thread_affinity(const CpuSet& thread_affinity_mask);
// misc function wrapper for openmp routines
NCNN_EXPORT int get_omp_num_threads();
NCNN_EXPORT void set_omp_num_threads(int num_threads);
NCNN_EXPORT int get_omp_dynamic();
NCNN_EXPORT void set_omp_dynamic(int dynamic);
NCNN_EXPORT int get_omp_thread_num();
NCNN_EXPORT int get_kmp_blocktime();
NCNN_EXPORT void set_kmp_blocktime(int time_ms);
// need to flush denormals on Intel Chipset.
// Other architectures such as ARM can be added as needed.
// 0 = DAZ OFF, FTZ OFF
// 1 = DAZ ON , FTZ OFF
// 2 = DAZ OFF, FTZ ON
// 3 = DAZ ON, FTZ ON
NCNN_EXPORT int get_flush_denormals();
NCNN_EXPORT int set_flush_denormals(int flush_denormals);
} // namespace ncnn
#endif // NCNN_CPU_H

122
lib_ncnn/ncnn/datareader.h Executable file
View File

@ -0,0 +1,122 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_DATAREADER_H
#define NCNN_DATAREADER_H
#include "platform.h"
#if NCNN_STDIO
#include <stdio.h>
#endif
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 9
#include <android/asset_manager.h>
#endif
#endif // NCNN_PLATFORM_API
namespace ncnn {
// data read wrapper
class NCNN_EXPORT DataReader
{
public:
DataReader();
virtual ~DataReader();
#if NCNN_STRING
// parse plain param text
// return 1 if scan success
virtual int scan(const char* format, void* p) const;
#endif // NCNN_STRING
// read binary param and model data
// return bytes read
virtual size_t read(void* buf, size_t size) const;
// get model data reference
// return bytes referenced
virtual size_t reference(size_t size, const void** buf) const;
};
#if NCNN_STDIO
class DataReaderFromStdioPrivate;
class NCNN_EXPORT DataReaderFromStdio : public DataReader
{
public:
explicit DataReaderFromStdio(FILE* fp);
virtual ~DataReaderFromStdio();
#if NCNN_STRING
virtual int scan(const char* format, void* p) const;
#endif // NCNN_STRING
virtual size_t read(void* buf, size_t size) const;
private:
DataReaderFromStdio(const DataReaderFromStdio&);
DataReaderFromStdio& operator=(const DataReaderFromStdio&);
private:
DataReaderFromStdioPrivate* const d;
};
#endif // NCNN_STDIO
class DataReaderFromMemoryPrivate;
class NCNN_EXPORT DataReaderFromMemory : public DataReader
{
public:
explicit DataReaderFromMemory(const unsigned char*& mem);
virtual ~DataReaderFromMemory();
#if NCNN_STRING
virtual int scan(const char* format, void* p) const;
#endif // NCNN_STRING
virtual size_t read(void* buf, size_t size) const;
virtual size_t reference(size_t size, const void** buf) const;
private:
DataReaderFromMemory(const DataReaderFromMemory&);
DataReaderFromMemory& operator=(const DataReaderFromMemory&);
private:
DataReaderFromMemoryPrivate* const d;
};
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 9
class DataReaderFromAndroidAssetPrivate;
class NCNN_EXPORT DataReaderFromAndroidAsset : public DataReader
{
public:
explicit DataReaderFromAndroidAsset(AAsset* asset);
virtual ~DataReaderFromAndroidAsset();
#if NCNN_STRING
virtual int scan(const char* format, void* p) const;
#endif // NCNN_STRING
virtual size_t read(void* buf, size_t size) const;
private:
DataReaderFromAndroidAsset(const DataReaderFromAndroidAsset&);
DataReaderFromAndroidAsset& operator=(const DataReaderFromAndroidAsset&);
private:
DataReaderFromAndroidAssetPrivate* const d;
};
#endif // __ANDROID_API__ >= 9
#endif // NCNN_PLATFORM_API
} // namespace ncnn
#endif // NCNN_DATAREADER_H

359
lib_ncnn/ncnn/gpu.h Executable file
View File

@ -0,0 +1,359 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_GPU_H
#define NCNN_GPU_H
#include "platform.h"
#if NCNN_VULKAN
#include "mat.h"
#include <vulkan/vulkan.h>
#include "vulkan_header_fix.h"
namespace ncnn {
// instance
NCNN_EXPORT int create_gpu_instance();
NCNN_EXPORT void destroy_gpu_instance();
// instance extension capability
extern int support_VK_KHR_external_memory_capabilities;
extern int support_VK_KHR_get_physical_device_properties2;
extern int support_VK_KHR_get_surface_capabilities2;
extern int support_VK_KHR_surface;
extern int support_VK_EXT_debug_utils;
#if __ANDROID_API__ >= 26
extern int support_VK_KHR_android_surface;
#endif // __ANDROID_API__ >= 26
// VK_KHR_external_memory_capabilities
extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR;
// VK_KHR_get_physical_device_properties2
extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR;
extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR;
extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR;
extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR;
extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR;
extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR;
extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR;
// VK_KHR_get_surface_capabilities2
extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR;
extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR;
// VK_KHR_surface
extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR;
extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR;
extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR;
extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR;
#if __ANDROID_API__ >= 26
// VK_KHR_android_surface
extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;
#endif // __ANDROID_API__ >= 26
// VK_NV_cooperative_matrix
extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;
// get info
NCNN_EXPORT int get_gpu_count();
NCNN_EXPORT int get_default_gpu_index();
class GpuInfoPrivate;
class NCNN_EXPORT GpuInfo
{
public:
explicit GpuInfo();
virtual ~GpuInfo();
// vulkan physical device
VkPhysicalDevice physical_device() const;
// memory properties
const VkPhysicalDeviceMemoryProperties& physical_device_memory_properties() const;
// info
uint32_t api_version() const;
uint32_t driver_version() const;
uint32_t vendor_id() const;
uint32_t device_id() const;
const char* device_name() const;
uint8_t* pipeline_cache_uuid() const;
// 0 = discrete gpu
// 1 = integrated gpu
// 2 = virtual gpu
// 3 = cpu
int type() const;
// hardware limit
uint32_t max_shared_memory_size() const;
uint32_t max_workgroup_count_x() const;
uint32_t max_workgroup_count_y() const;
uint32_t max_workgroup_count_z() const;
uint32_t max_workgroup_invocations() const;
uint32_t max_workgroup_size_x() const;
uint32_t max_workgroup_size_y() const;
uint32_t max_workgroup_size_z() const;
size_t memory_map_alignment() const;
size_t buffer_offset_alignment() const;
size_t non_coherent_atom_size() const;
size_t buffer_image_granularity() const;
uint32_t max_image_dimension_1d() const;
uint32_t max_image_dimension_2d() const;
uint32_t max_image_dimension_3d() const;
float timestamp_period() const;
// runtime
uint32_t compute_queue_family_index() const;
uint32_t graphics_queue_family_index() const;
uint32_t transfer_queue_family_index() const;
uint32_t compute_queue_count() const;
uint32_t graphics_queue_count() const;
uint32_t transfer_queue_count() const;
// property
bool unified_compute_transfer_queue() const;
// subgroup
uint32_t subgroup_size() const;
bool support_subgroup_basic() const;
bool support_subgroup_vote() const;
bool support_subgroup_ballot() const;
bool support_subgroup_shuffle() const;
// bug is not feature
bool bug_storage_buffer_no_l1() const;
bool bug_corrupted_online_pipeline_cache() const;
bool bug_buffer_image_load_zero() const;
// but sometimes bug is a feature
bool bug_implicit_fp16_arithmetic() const;
// fp16 and int8 feature
bool support_fp16_packed() const;
bool support_fp16_storage() const;
bool support_fp16_arithmetic() const;
bool support_int8_packed() const;
bool support_int8_storage() const;
bool support_int8_arithmetic() const;
// ycbcr conversion feature
bool support_ycbcr_conversion() const;
// cooperative matrix feature
bool support_cooperative_matrix() const;
bool support_cooperative_matrix_16_8_8() const;
// extension capability
int support_VK_KHR_8bit_storage() const;
int support_VK_KHR_16bit_storage() const;
int support_VK_KHR_bind_memory2() const;
int support_VK_KHR_create_renderpass2() const;
int support_VK_KHR_dedicated_allocation() const;
int support_VK_KHR_descriptor_update_template() const;
int support_VK_KHR_external_memory() const;
int support_VK_KHR_get_memory_requirements2() const;
int support_VK_KHR_maintenance1() const;
int support_VK_KHR_maintenance2() const;
int support_VK_KHR_maintenance3() const;
int support_VK_KHR_multiview() const;
int support_VK_KHR_push_descriptor() const;
int support_VK_KHR_sampler_ycbcr_conversion() const;
int support_VK_KHR_shader_float16_int8() const;
int support_VK_KHR_shader_float_controls() const;
int support_VK_KHR_storage_buffer_storage_class() const;
int support_VK_KHR_swapchain() const;
int support_VK_EXT_descriptor_indexing() const;
int support_VK_EXT_memory_budget() const;
int support_VK_EXT_queue_family_foreign() const;
#if __ANDROID_API__ >= 26
int support_VK_ANDROID_external_memory_android_hardware_buffer() const;
#endif // __ANDROID_API__ >= 26
int support_VK_NV_cooperative_matrix() const;
private:
GpuInfo(const GpuInfo&);
GpuInfo& operator=(const GpuInfo&);
private:
friend int create_gpu_instance();
GpuInfoPrivate* const d;
};
NCNN_EXPORT const GpuInfo& get_gpu_info(int device_index = get_default_gpu_index());
class VkAllocator;
class VkCompute;
class Option;
class PipelineCache;
class VulkanDevicePrivate;
class NCNN_EXPORT VulkanDevice
{
public:
VulkanDevice(int device_index = get_default_gpu_index());
~VulkanDevice();
const GpuInfo& info;
VkDevice vkdevice() const;
VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size) const;
// with fixed workgroup size
VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size, uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z) const;
// helper for creating pipeline
int create_descriptorset_layout(int binding_count, const int* binding_types, VkDescriptorSetLayout* descriptorset_layout) const;
int create_pipeline_layout(int push_constant_count, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout* pipeline_layout) const;
int create_pipeline(VkShaderModule shader_module, VkPipelineLayout pipeline_layout, const std::vector<vk_specialization_type>& specializations, VkPipeline* pipeline) const;
int create_descriptor_update_template(int binding_count, const int* binding_types, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout pipeline_layout, VkDescriptorUpdateTemplateKHR* descriptor_update_template) const;
uint32_t find_memory_index(uint32_t memory_type_bits, VkFlags required, VkFlags preferred, VkFlags preferred_not) const;
bool is_mappable(uint32_t memory_type_index) const;
bool is_coherent(uint32_t memory_type_index) const;
VkQueue acquire_queue(uint32_t queue_family_index) const;
void reclaim_queue(uint32_t queue_family_index, VkQueue queue) const;
// allocator on this device
VkAllocator* acquire_blob_allocator() const;
void reclaim_blob_allocator(VkAllocator* allocator) const;
VkAllocator* acquire_staging_allocator() const;
void reclaim_staging_allocator(VkAllocator* allocator) const;
// immutable sampler for texelfetch
const VkSampler* immutable_texelfetch_sampler() const;
// dummy buffer image
VkMat get_dummy_buffer() const;
VkImageMat get_dummy_image() const;
VkImageMat get_dummy_image_readonly() const;
// pipeline cache on this device
const PipelineCache* get_pipeline_cache() const;
// test image allocation
bool shape_support_image_storage(const Mat& shape) const;
// current gpu heap memory budget in MB
uint32_t get_heap_budget() const;
// utility operator
void convert_packing(const VkMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const;
void convert_packing(const VkImageMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const;
void convert_packing(const VkMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const;
void convert_packing(const VkImageMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const;
// VK_KHR_bind_memory2
PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
// VK_KHR_create_renderpass2
PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;
PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;
PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;
PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;
// VK_KHR_descriptor_update_template
PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR;
// VK_KHR_get_memory_requirements2
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR;
// VK_KHR_maintenance1
PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;
// VK_KHR_maintenance3
PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;
// VK_KHR_push_descriptor
PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;
// VK_KHR_sampler_ycbcr_conversion
PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;
PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;
// VK_KHR_swapchain
PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR;
PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;
PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR;
PFN_vkQueuePresentKHR vkQueuePresentKHR;
#if __ANDROID_API__ >= 26
// VK_ANDROID_external_memory_android_hardware_buffer
PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;
PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;
#endif // __ANDROID_API__ >= 26
protected:
// device extension
int init_device_extension();
private:
VulkanDevice(const VulkanDevice&);
VulkanDevice& operator=(const VulkanDevice&);
private:
VulkanDevicePrivate* const d;
};
NCNN_EXPORT VulkanDevice* get_gpu_device(int device_index = get_default_gpu_index());
// online spirv compilation
NCNN_EXPORT int compile_spirv_module(const char* comp_string, const Option& opt, std::vector<uint32_t>& spirv);
NCNN_EXPORT int compile_spirv_module(const char* comp_data, int comp_data_size, const Option& opt, std::vector<uint32_t>& spirv);
NCNN_EXPORT int compile_spirv_module(int shader_type_index, const Option& opt, std::vector<uint32_t>& spirv);
// info from spirv
class NCNN_EXPORT ShaderInfo
{
public:
int specialization_count;
int binding_count;
int push_constant_count;
// 0 = null
// 1 = storage buffer
// 2 = storage image
// 3 = combined image sampler
int binding_types[16]; // 16 is large enough I think ...
int reserved_0;
int reserved_1;
int reserved_2;
int reserved_3;
};
NCNN_EXPORT int resolve_shader_info(const uint32_t* spv_data, size_t spv_data_size, ShaderInfo& shader_info);
} // namespace ncnn
#endif // NCNN_VULKAN
#endif // NCNN_GPU_H

215
lib_ncnn/ncnn/layer.h Executable file
View File

@ -0,0 +1,215 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_H
#define NCNN_LAYER_H
#include "mat.h"
#include "modelbin.h"
#include "option.h"
#include "paramdict.h"
#include "platform.h"
#include <math.h>
#if NCNN_VULKAN
#include "command.h"
#include "pipeline.h"
#include <vulkan/vulkan.h>
#endif // NCNN_VULKAN
namespace ncnn {
class NCNN_EXPORT Layer
{
public:
// empty
Layer();
// virtual destructor
virtual ~Layer();
// load layer specific parameter from parsed dict
// return 0 if success
virtual int load_param(const ParamDict& pd);
// load layer specific weight data from model binary
// return 0 if success
virtual int load_model(const ModelBin& mb);
// layer implementation specific setup
// return 0 if success
virtual int create_pipeline(const Option& opt);
// layer implementation specific clean
// return 0 if success
virtual int destroy_pipeline(const Option& opt);
public:
// one input and one output blob
bool one_blob_only;
// support inplace inference
bool support_inplace;
// support vulkan compute
bool support_vulkan;
// accept input blob with packed storage
bool support_packing;
// accept bf16
bool support_bf16_storage;
// accept fp16
bool support_fp16_storage;
// accept int8
bool support_int8_storage;
// shader image storage
bool support_image_storage;
// shader tensor storage
bool support_tensor_storage;
bool support_reserved_00;
bool support_reserved_0;
bool support_reserved_1;
bool support_reserved_2;
bool support_reserved_3;
bool support_reserved_4;
bool support_reserved_5;
bool support_reserved_6;
bool support_reserved_7;
bool support_reserved_8;
bool support_reserved_9;
bool support_reserved_10;
bool support_reserved_11;
bool support_reserved_12;
bool support_reserved_13;
public:
// implement inference
// return 0 if success
virtual int forward(const std::vector<Mat>& bottom_blobs, std::vector<Mat>& top_blobs, const Option& opt) const;
virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
// implement inplace inference
// return 0 if success
virtual int forward_inplace(std::vector<Mat>& bottom_top_blobs, const Option& opt) const;
virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
#if NCNN_VULKAN
public:
// upload weight blob from host to device
virtual int upload_model(VkTransfer& cmd, const Option& opt);
public:
// implement inference
// return 0 if success
virtual int forward(const std::vector<VkMat>& bottom_blobs, std::vector<VkMat>& top_blobs, VkCompute& cmd, const Option& opt) const;
virtual int forward(const VkMat& bottom_blob, VkMat& top_blob, VkCompute& cmd, const Option& opt) const;
// implement inference
// return 0 if success
virtual int forward(const std::vector<VkImageMat>& bottom_blobs, std::vector<VkImageMat>& top_blobs, VkCompute& cmd, const Option& opt) const;
virtual int forward(const VkImageMat& bottom_blob, VkImageMat& top_blob, VkCompute& cmd, const Option& opt) const;
// implement inplace inference
// return 0 if success
virtual int forward_inplace(std::vector<VkMat>& bottom_top_blobs, VkCompute& cmd, const Option& opt) const;
virtual int forward_inplace(VkMat& bottom_top_blob, VkCompute& cmd, const Option& opt) const;
// implement inplace inference
// return 0 if success
virtual int forward_inplace(std::vector<VkImageMat>& bottom_top_blobs, VkCompute& cmd, const Option& opt) const;
virtual int forward_inplace(VkImageMat& bottom_top_blob, VkCompute& cmd, const Option& opt) const;
public:
// assigned immediately after creating this layer
const VulkanDevice* vkdev;
#endif // NCNN_VULKAN
public:
// custom user data
void* userdata;
// layer type index
int typeindex;
#if NCNN_STRING
// layer type name
std::string type;
// layer name
std::string name;
#endif // NCNN_STRING
// blob index which this layer needs as input
std::vector<int> bottoms;
// blob index which this layer produces as output
std::vector<int> tops;
// shape hint
std::vector<Mat> bottom_shapes;
std::vector<Mat> top_shapes;
};
// layer factory function
typedef Layer* (*layer_creator_func)(void*);
typedef void (*layer_destroyer_func)(Layer*, void*);
struct layer_registry_entry
{
#if NCNN_STRING
// layer type name
const char* name;
#endif // NCNN_STRING
// layer factory entry
layer_creator_func creator;
};
struct custom_layer_registry_entry
{
#if NCNN_STRING
// layer type name
const char* name;
#endif // NCNN_STRING
// layer factory entry
layer_creator_func creator;
layer_destroyer_func destroyer;
void* userdata;
};
#if NCNN_STRING
// get layer type from type name
NCNN_EXPORT int layer_to_index(const char* type);
// create layer from type name
NCNN_EXPORT Layer* create_layer(const char* type);
#endif // NCNN_STRING
// create layer from layer type
NCNN_EXPORT Layer* create_layer(int index);
#define DEFINE_LAYER_CREATOR(name) \
::ncnn::Layer* name##_layer_creator(void* /*userdata*/) \
{ \
return new name; \
}
#define DEFINE_LAYER_DESTROYER(name) \
void name##_layer_destroyer(::ncnn::Layer* layer, void* /*userdata*/) \
{ \
delete layer; \
}
} // namespace ncnn
#endif // NCNN_LAYER_H

View File

@ -0,0 +1,29 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_SHADER_TYPE_H
#define NCNN_LAYER_SHADER_TYPE_H
namespace ncnn {
namespace LayerShaderType {
enum LayerShaderType
{
#include "layer_shader_type_enum.h"
};
} // namespace LayerShaderType
} // namespace ncnn
#endif // NCNN_LAYER_SHADER_TYPE_H

View File

@ -0,0 +1,5 @@
// Layer Shader Enum header
//
// This file is auto-generated by cmake, don't edit it.

30
lib_ncnn/ncnn/layer_type.h Executable file
View File

@ -0,0 +1,30 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_LAYER_TYPE_H
#define NCNN_LAYER_TYPE_H
namespace ncnn {
namespace LayerType {
enum LayerType
{
#include "layer_type_enum.h"
CustomBit = (1 << 8),
};
} // namespace LayerType
} // namespace ncnn
#endif // NCNN_LAYER_TYPE_H

98
lib_ncnn/ncnn/layer_type_enum.h Executable file
View File

@ -0,0 +1,98 @@
// Layer Type Enum header
//
// This file is auto-generated by cmake, don't edit it.
AbsVal = 0,
ArgMax = 1,
BatchNorm = 2,
Bias = 3,
BNLL = 4,
Concat = 5,
Convolution = 6,
Crop = 7,
Deconvolution = 8,
Dropout = 9,
Eltwise = 10,
ELU = 11,
Embed = 12,
Exp = 13,
Flatten = 14,
InnerProduct = 15,
Input = 16,
Log = 17,
LRN = 18,
MemoryData = 19,
MVN = 20,
Pooling = 21,
Power = 22,
PReLU = 23,
Proposal = 24,
Reduction = 25,
ReLU = 26,
Reshape = 27,
ROIPooling = 28,
Scale = 29,
Sigmoid = 30,
Slice = 31,
Softmax = 32,
Split = 33,
SPP = 34,
TanH = 35,
Threshold = 36,
Tile = 37,
RNN = 38,
LSTM = 39,
BinaryOp = 40,
UnaryOp = 41,
ConvolutionDepthWise = 42,
Padding = 43,
Squeeze = 44,
ExpandDims = 45,
Normalize = 46,
Permute = 47,
PriorBox = 48,
DetectionOutput = 49,
Interp = 50,
DeconvolutionDepthWise = 51,
ShuffleChannel = 52,
InstanceNorm = 53,
Clip = 54,
Reorg = 55,
YoloDetectionOutput = 56,
Quantize = 57,
Dequantize = 58,
Yolov3DetectionOutput = 59,
PSROIPooling = 60,
ROIAlign = 61,
Packing = 62,
Requantize = 63,
Cast = 64,
HardSigmoid = 65,
SELU = 66,
HardSwish = 67,
Noop = 68,
PixelShuffle = 69,
DeepCopy = 70,
Mish = 71,
StatisticsPooling = 72,
Swish = 73,
Gemm = 74,
GroupNorm = 75,
LayerNorm = 76,
Softplus = 77,
GRU = 78,
MultiHeadAttention = 79,
GELU = 80,
Convolution1D = 81,
Pooling1D = 82,
ConvolutionDepthWise1D = 83,
Convolution3D = 84,
ConvolutionDepthWise3D = 85,
Pooling3D = 86,
MatMul = 87,
Deconvolution1D = 88,
DeconvolutionDepthWise1D = 89,
Deconvolution3D = 90,
DeconvolutionDepthWise3D = 91,
Einsum = 92,

1837
lib_ncnn/ncnn/mat.h Executable file

File diff suppressed because it is too large Load Diff

78
lib_ncnn/ncnn/modelbin.h Executable file
View File

@ -0,0 +1,78 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_MODELBIN_H
#define NCNN_MODELBIN_H
#include "mat.h"
namespace ncnn {
class DataReader;
class NCNN_EXPORT ModelBin
{
public:
ModelBin();
virtual ~ModelBin();
// element type
// 0 = auto
// 1 = float32
// 2 = float16
// 3 = int8
// load vec
virtual Mat load(int w, int type) const = 0;
// load image
virtual Mat load(int w, int h, int type) const;
// load dim
virtual Mat load(int w, int h, int c, int type) const;
};
class ModelBinFromDataReaderPrivate;
class NCNN_EXPORT ModelBinFromDataReader : public ModelBin
{
public:
explicit ModelBinFromDataReader(const DataReader& dr);
virtual ~ModelBinFromDataReader();
virtual Mat load(int w, int type) const;
private:
ModelBinFromDataReader(const ModelBinFromDataReader&);
ModelBinFromDataReader& operator=(const ModelBinFromDataReader&);
private:
ModelBinFromDataReaderPrivate* const d;
};
class ModelBinFromMatArrayPrivate;
class NCNN_EXPORT ModelBinFromMatArray : public ModelBin
{
public:
// construct from weight blob array
explicit ModelBinFromMatArray(const Mat* weights);
virtual ~ModelBinFromMatArray();
virtual Mat load(int w, int type) const;
private:
ModelBinFromMatArray(const ModelBinFromMatArray&);
ModelBinFromMatArray& operator=(const ModelBinFromMatArray&);
private:
ModelBinFromMatArrayPrivate* const d;
};
} // namespace ncnn
#endif // NCNN_MODELBIN_H

42
lib_ncnn/ncnn/ncnn_export.h Executable file
View File

@ -0,0 +1,42 @@
#ifndef NCNN_EXPORT_H
#define NCNN_EXPORT_H
#ifdef NCNN_STATIC_DEFINE
# define NCNN_EXPORT
# define NCNN_NO_EXPORT
#else
# ifndef NCNN_EXPORT
# ifdef ncnn_EXPORTS
/* We are building this library */
# define NCNN_EXPORT
# else
/* We are using this library */
# define NCNN_EXPORT
# endif
# endif
# ifndef NCNN_NO_EXPORT
# define NCNN_NO_EXPORT
# endif
#endif
#ifndef NCNN_DEPRECATED
# define NCNN_DEPRECATED __attribute__ ((__deprecated__))
#endif
#ifndef NCNN_DEPRECATED_EXPORT
# define NCNN_DEPRECATED_EXPORT NCNN_EXPORT NCNN_DEPRECATED
#endif
#ifndef NCNN_DEPRECATED_NO_EXPORT
# define NCNN_DEPRECATED_NO_EXPORT NCNN_NO_EXPORT NCNN_DEPRECATED
#endif
#if 0 /* DEFINE_NO_DEPRECATED */
# ifndef NCNN_NO_DEPRECATED
# define NCNN_NO_DEPRECATED
# endif
#endif
#endif /* NCNN_EXPORT_H */

272
lib_ncnn/ncnn/net.h Executable file
View File

@ -0,0 +1,272 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_NET_H
#define NCNN_NET_H
#include "blob.h"
#include "layer.h"
#include "mat.h"
#include "option.h"
#include "platform.h"
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 9
#include <android/asset_manager.h>
#endif // __ANDROID_API__ >= 9
#endif // NCNN_PLATFORM_API
namespace ncnn {
#if NCNN_VULKAN
class VkCompute;
#endif // NCNN_VULKAN
class DataReader;
class Extractor;
class NetPrivate;
class NCNN_EXPORT Net
{
public:
// empty init
Net();
// clear and destroy
virtual ~Net();
public:
// option can be changed before loading
Option opt;
#if NCNN_VULKAN
// set gpu device by index
void set_vulkan_device(int device_index);
// set gpu device by device handle, no owner transfer
void set_vulkan_device(const VulkanDevice* vkdev);
const VulkanDevice* vulkan_device() const;
#endif // NCNN_VULKAN
#if NCNN_STRING
// register custom layer by layer type name
// return 0 if success
int register_custom_layer(const char* type, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0);
virtual int custom_layer_to_index(const char* type);
#endif // NCNN_STRING
// register custom layer by layer type
// return 0 if success
int register_custom_layer(int index, layer_creator_func creator, layer_destroyer_func destroyer = 0, void* userdata = 0);
#if NCNN_STRING
int load_param(const DataReader& dr);
#endif // NCNN_STRING
int load_param_bin(const DataReader& dr);
int load_model(const DataReader& dr);
#if NCNN_STDIO
#if NCNN_STRING
// load network structure from plain param file
// return 0 if success
int load_param(FILE* fp);
int load_param(const char* protopath);
int load_param_mem(const char* mem);
#endif // NCNN_STRING
// load network structure from binary param file
// return 0 if success
int load_param_bin(FILE* fp);
int load_param_bin(const char* protopath);
// load network weight data from model file
// return 0 if success
int load_model(FILE* fp);
int load_model(const char* modelpath);
#endif // NCNN_STDIO
// load network structure from external memory
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_param(const unsigned char* mem);
// reference network weight data from external memory
// weight data is not copied but referenced
// so external memory should be retained when used
// memory pointer must be 32-bit aligned
// return bytes consumed
int load_model(const unsigned char* mem);
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 9
#if NCNN_STRING
// convenient load network structure from android asset plain param file
int load_param(AAsset* asset);
int load_param(AAssetManager* mgr, const char* assetpath);
#endif // NCNN_STRING
// convenient load network structure from android asset binary param file
int load_param_bin(AAsset* asset);
int load_param_bin(AAssetManager* mgr, const char* assetpath);
// convenient load network weight data from android asset model file
int load_model(AAsset* asset);
int load_model(AAssetManager* mgr, const char* assetpath);
#endif // __ANDROID_API__ >= 9
#endif // NCNN_PLATFORM_API
// unload network structure and weight data
void clear();
// construct an Extractor from network
Extractor create_extractor() const;
// get input/output indexes/names
const std::vector<int>& input_indexes() const;
const std::vector<int>& output_indexes() const;
#if NCNN_STRING
const std::vector<const char*>& input_names() const;
const std::vector<const char*>& output_names() const;
#endif
const std::vector<Blob>& blobs() const;
const std::vector<Layer*>& layers() const;
std::vector<Blob>& mutable_blobs();
std::vector<Layer*>& mutable_layers();
protected:
friend class Extractor;
#if NCNN_STRING
int find_blob_index_by_name(const char* name) const;
int find_layer_index_by_name(const char* name) const;
virtual Layer* create_custom_layer(const char* type);
#endif // NCNN_STRING
virtual Layer* create_custom_layer(int index);
private:
Net(const Net&);
Net& operator=(const Net&);
private:
NetPrivate* const d;
};
class ExtractorPrivate;
class NCNN_EXPORT Extractor
{
public:
virtual ~Extractor();
// copy
Extractor(const Extractor&);
// assign
Extractor& operator=(const Extractor&);
// clear blob mats and alloctors
void clear();
// enable light mode
// intermediate blob will be recycled when enabled
// enabled by default
void set_light_mode(bool enable);
// set thread count for this extractor
// this will overwrite the global setting
// default count is system depended
void set_num_threads(int num_threads);
// set blob memory allocator
void set_blob_allocator(Allocator* allocator);
// set workspace memory allocator
void set_workspace_allocator(Allocator* allocator);
#if NCNN_VULKAN
void set_vulkan_compute(bool enable);
void set_blob_vkallocator(VkAllocator* allocator);
void set_workspace_vkallocator(VkAllocator* allocator);
void set_staging_vkallocator(VkAllocator* allocator);
#endif // NCNN_VULKAN
#if NCNN_STRING
// set input by blob name
// return 0 if success
int input(const char* blob_name, const Mat& in);
// get result by blob name
// return 0 if success
// type = 0, default
// type = 1, do not convert fp16/bf16 or / and packing
int extract(const char* blob_name, Mat& feat, int type = 0);
#endif // NCNN_STRING
// set input by blob index
// return 0 if success
int input(int blob_index, const Mat& in);
// get result by blob index
// return 0 if success
// type = 0, default
// type = 1, do not convert fp16/bf16 or / and packing
int extract(int blob_index, Mat& feat, int type = 0);
#if NCNN_VULKAN
#if NCNN_STRING
// set input by blob name
// return 0 if success
int input(const char* blob_name, const VkMat& in);
// get result by blob name
// return 0 if success
int extract(const char* blob_name, VkMat& feat, VkCompute& cmd);
// set input by blob name
// return 0 if success
int input(const char* blob_name, const VkImageMat& in);
// get result by blob name
// return 0 if success
int extract(const char* blob_name, VkImageMat& feat, VkCompute& cmd);
#endif // NCNN_STRING
// set input by blob index
// return 0 if success
int input(int blob_index, const VkMat& in);
// get result by blob index
// return 0 if success
int extract(int blob_index, VkMat& feat, VkCompute& cmd);
// set input by blob index
// return 0 if success
int input(int blob_index, const VkImageMat& in);
// get result by blob index
// return 0 if success
int extract(int blob_index, VkImageMat& feat, VkCompute& cmd);
#endif // NCNN_VULKAN
protected:
friend Extractor Net::create_extractor() const;
Extractor(const Net* net, size_t blob_count);
private:
ExtractorPrivate* const d;
};
} // namespace ncnn
#endif // NCNN_NET_H

153
lib_ncnn/ncnn/option.h Executable file
View File

@ -0,0 +1,153 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_OPTION_H
#define NCNN_OPTION_H
#include "platform.h"
namespace ncnn {
#if NCNN_VULKAN
class VkAllocator;
class PipelineCache;
#endif // NCNN_VULKAN
class Allocator;
class NCNN_EXPORT Option
{
public:
// default option
Option();
public:
// light mode
// intermediate blob will be recycled when enabled
// enabled by default
bool lightmode;
// thread count
// default value is the one returned by get_cpu_count()
int num_threads;
// blob memory allocator
Allocator* blob_allocator;
// workspace memory allocator
Allocator* workspace_allocator;
#if NCNN_VULKAN
// blob memory allocator
VkAllocator* blob_vkallocator;
// workspace memory allocator
VkAllocator* workspace_vkallocator;
// staging memory allocator
VkAllocator* staging_vkallocator;
// pipeline cache
PipelineCache* pipeline_cache;
#endif // NCNN_VULKAN
// the time openmp threads busy-wait for more work before going to sleep
// default value is 20ms to keep the cores enabled
// without too much extra power consumption afterwards
int openmp_blocktime;
// enable winograd convolution optimization
// improve convolution 3x3 stride1 performance, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
bool use_winograd_convolution;
// enable sgemm convolution optimization
// improve convolution 1x1 stride1 performance, may consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
bool use_sgemm_convolution;
// enable quantized int8 inference
// use low-precision int8 path for quantized model
// changes should be applied before loading network structure and weight
// enabled by default
bool use_int8_inference;
// enable vulkan compute
bool use_vulkan_compute;
// enable bf16 data type for storage
// improve most operator performance on all arm devices, may consume more memory
bool use_bf16_storage;
// enable options for gpu inference
bool use_fp16_packed;
bool use_fp16_storage;
bool use_fp16_arithmetic;
bool use_int8_packed;
bool use_int8_storage;
bool use_int8_arithmetic;
// enable simd-friendly packed memory layout
// improve all operator performance on all arm devices, will consume more memory
// changes should be applied before loading network structure and weight
// enabled by default
bool use_packing_layout;
bool use_shader_pack8;
// subgroup option
bool use_subgroup_basic;
bool use_subgroup_vote;
bool use_subgroup_ballot;
bool use_subgroup_shuffle;
// turn on for adreno
bool use_image_storage;
bool use_tensor_storage;
bool use_reserved_0;
// enable DAZ(Denormals-Are-Zero) and FTZ(Flush-To-Zero)
// default value is 3
// 0 = DAZ OFF, FTZ OFF
// 1 = DAZ ON , FTZ OFF
// 2 = DAZ OFF, FTZ ON
// 3 = DAZ ON, FTZ ON
int flush_denormals;
bool use_local_pool_allocator;
// enable local memory optimization for gpu inference
bool use_shader_local_memory;
// enable cooperative matrix optimization for gpu inference
bool use_cooperative_matrix;
// more fine-grained control of winograd convolution
bool use_winograd23_convolution;
bool use_winograd43_convolution;
bool use_winograd63_convolution;
bool use_reserved_6;
bool use_reserved_7;
bool use_reserved_8;
bool use_reserved_9;
bool use_reserved_10;
bool use_reserved_11;
};
} // namespace ncnn
#endif // NCNN_OPTION_H

73
lib_ncnn/ncnn/paramdict.h Executable file
View File

@ -0,0 +1,73 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PARAMDICT_H
#define NCNN_PARAMDICT_H
#include "mat.h"
// at most 32 parameters
#define NCNN_MAX_PARAM_COUNT 32
namespace ncnn {
class DataReader;
class Net;
class ParamDictPrivate;
class NCNN_EXPORT ParamDict
{
public:
// empty
ParamDict();
virtual ~ParamDict();
// copy
ParamDict(const ParamDict&);
// assign
ParamDict& operator=(const ParamDict&);
// get type
int type(int id) const;
// get int
int get(int id, int def) const;
// get float
float get(int id, float def) const;
// get array
Mat get(int id, const Mat& def) const;
// set int
void set(int id, int i);
// set float
void set(int id, float f);
// set array
void set(int id, const Mat& v);
protected:
friend class Net;
void clear();
int load_param(const DataReader& dr);
int load_param_bin(const DataReader& dr);
private:
ParamDictPrivate* const d;
};
} // namespace ncnn
#endif // NCNN_PARAMDICT_H

113
lib_ncnn/ncnn/pipeline.h Executable file
View File

@ -0,0 +1,113 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PIPELINE_H
#define NCNN_PIPELINE_H
#include "mat.h"
#include "platform.h"
#if NCNN_VULKAN
#include "gpu.h"
#include <vulkan/vulkan.h>
#endif // NCNN_VULKAN
namespace ncnn {
#if NCNN_VULKAN
class Option;
class PipelinePrivate;
class NCNN_EXPORT Pipeline
{
public:
explicit Pipeline(const VulkanDevice* vkdev);
virtual ~Pipeline();
public:
void set_optimal_local_size_xyz(int w = 4, int h = 4, int c = 4);
void set_optimal_local_size_xyz(const Mat& local_size_xyz);
void set_local_size_xyz(int w, int h, int c);
int create(const uint32_t* spv_data, size_t spv_data_size, const std::vector<vk_specialization_type>& specializations);
int create(int shader_type_index, const Option& opt, const std::vector<vk_specialization_type>& specializations);
public:
VkShaderModule shader_module() const;
VkDescriptorSetLayout descriptorset_layout() const;
VkPipelineLayout pipeline_layout() const;
VkPipeline pipeline() const;
VkDescriptorUpdateTemplateKHR descriptor_update_template() const;
const ShaderInfo& shader_info() const;
uint32_t local_size_x() const;
uint32_t local_size_y() const;
uint32_t local_size_z() const;
protected:
void set_shader_module(VkShaderModule shader_module);
void set_descriptorset_layout(VkDescriptorSetLayout descriptorset_layout);
void set_pipeline_layout(VkPipelineLayout pipeline_layout);
void set_pipeline(VkPipeline pipeline);
void set_descriptor_update_template(VkDescriptorUpdateTemplateKHR descriptor_update_template);
void set_shader_info(const ShaderInfo& shader_info);
public:
const VulkanDevice* vkdev;
private:
Pipeline(const Pipeline&);
Pipeline& operator=(const Pipeline&);
private:
PipelinePrivate* const d;
};
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
class VkCompute;
class NCNN_EXPORT ImportAndroidHardwareBufferPipeline : private Pipeline
{
public:
explicit ImportAndroidHardwareBufferPipeline(const VulkanDevice* vkdev);
virtual ~ImportAndroidHardwareBufferPipeline();
int create(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator, int type_to, int rotate_from, const Option& opt);
int create(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator, int type_to, int rotate_from, int target_width, int target_height, const Option& opt);
void destroy();
friend class VkCompute;
protected:
int create_shader_module(const Option& opt);
int create_sampler(VkAndroidHardwareBufferImageAllocator* ahb_im_allocator);
int create_descriptorset_layout();
public:
int type_to;
int rotate_from;
bool need_resize;
VkSampler sampler;
};
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
#endif // NCNN_VULKAN
} // namespace ncnn
#endif // NCNN_PIPELINE_H

85
lib_ncnn/ncnn/pipelinecache.h Executable file
View File

@ -0,0 +1,85 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PIPELINECACHE_H
#define NCNN_PIPELINECACHE_H
#include "platform.h"
#if NCNN_VULKAN
#include <vulkan/vulkan.h>
#endif // NCNN_VULKAN
#include "mat.h"
#include "gpu.h"
namespace ncnn {
#if NCNN_VULKAN
class VulkanDevice;
class PipelineCachePrivate;
class NCNN_EXPORT PipelineCache
{
public:
explicit PipelineCache(const VulkanDevice* _vkdev);
virtual ~PipelineCache();
void clear();
int get_pipeline(const uint32_t* spv_data, size_t spv_data_size, const std::vector<vk_specialization_type>& specializations,
uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z,
VkShaderModule* shader_module,
VkDescriptorSetLayout* descriptorset_layout,
VkPipelineLayout* pipeline_layout,
VkPipeline* pipeline,
VkDescriptorUpdateTemplateKHR* descriptor_update_template,
ShaderInfo& shader_info) const;
int get_pipeline(int shader_type_index, const Option& opt, const std::vector<vk_specialization_type>& specializations,
uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z,
VkShaderModule* shader_module,
VkDescriptorSetLayout* descriptorset_layout,
VkPipelineLayout* pipeline_layout,
VkPipeline* pipeline,
VkDescriptorUpdateTemplateKHR* descriptor_update_template,
ShaderInfo& shader_info) const;
protected:
int create_shader_module(int shader_type_index, const Option& opt, uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z,
VkShaderModule* _shader_module, ShaderInfo& si) const;
int new_pipeline(VkShaderModule shader_module, const ShaderInfo& shader_info, const std::vector<vk_specialization_type>& specializations,
VkDescriptorSetLayout* descriptorset_layout,
VkPipelineLayout* pipeline_layout,
VkPipeline* pipeline,
VkDescriptorUpdateTemplateKHR* descriptor_update_template) const;
protected:
const VulkanDevice* vkdev;
private:
PipelineCache(const PipelineCache&);
PipelineCache& operator=(const PipelineCache&);
private:
PipelineCachePrivate* const d;
};
#endif // NCNN_VULKAN
} // namespace ncnn
#endif // NCNN_PIPELINECACHE_H

273
lib_ncnn/ncnn/platform.h Executable file
View File

@ -0,0 +1,273 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_PLATFORM_H
#define NCNN_PLATFORM_H
#define NCNN_STDIO 1
#define NCNN_STRING 1
#define NCNN_SIMPLEOCV 0
#define NCNN_SIMPLEOMP 0
#define NCNN_SIMPLESTL 0
#define NCNN_THREADS 1
#define NCNN_BENCHMARK 0
#define NCNN_C_API 1
#define NCNN_PLATFORM_API 1
#define NCNN_PIXEL 1
#define NCNN_PIXEL_ROTATE 1
#define NCNN_PIXEL_AFFINE 1
#define NCNN_PIXEL_DRAWING 1
#define NCNN_VULKAN 0
#define NCNN_SYSTEM_GLSLANG 0
#define NCNN_RUNTIME_CPU 1
#define NCNN_AVX 1
#define NCNN_XOP 1
#define NCNN_FMA 1
#define NCNN_F16C 1
#define NCNN_AVX2 1
#define NCNN_AVXVNNI 0
#define NCNN_AVX512 1
#define NCNN_AVX512VNNI 1
#if __aarch64__
#define NCNN_ARM82 0
#define NCNN_ARM82DOT 0
#endif // __aarch64__
#define NCNN_MSA 0
#define NCNN_MMI 0
#define NCNN_RVV 0
#define NCNN_INT8 1
#define NCNN_BF16 1
#define NCNN_FORCE_INLINE 1
#define NCNN_VERSION_STRING "1.0.20220617"
#include "ncnn_export.h"
#ifdef __cplusplus
#if NCNN_THREADS
#if (defined _WIN32 && !(defined __MINGW32__))
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
#else
#include <pthread.h>
#endif
#endif // NCNN_THREADS
#if __ANDROID_API__ >= 26
#define VK_USE_PLATFORM_ANDROID_KHR
#endif // __ANDROID_API__ >= 26
namespace ncnn {
#if NCNN_THREADS
#if (defined _WIN32 && !(defined __MINGW32__))
class NCNN_EXPORT Mutex
{
public:
Mutex() { InitializeSRWLock(&srwlock); }
~Mutex() {}
void lock() { AcquireSRWLockExclusive(&srwlock); }
void unlock() { ReleaseSRWLockExclusive(&srwlock); }
private:
friend class ConditionVariable;
// NOTE SRWLock is available from windows vista
SRWLOCK srwlock;
};
class NCNN_EXPORT ConditionVariable
{
public:
ConditionVariable() { InitializeConditionVariable(&condvar); }
~ConditionVariable() {}
void wait(Mutex& mutex) { SleepConditionVariableSRW(&condvar, &mutex.srwlock, INFINITE, 0); }
void broadcast() { WakeAllConditionVariable(&condvar); }
void signal() { WakeConditionVariable(&condvar); }
private:
CONDITION_VARIABLE condvar;
};
static unsigned __stdcall start_wrapper(void* args);
class NCNN_EXPORT Thread
{
public:
Thread(void* (*start)(void*), void* args = 0) { _start = start; _args = args; handle = (HANDLE)_beginthreadex(0, 0, start_wrapper, this, 0, 0); }
~Thread() {}
void join() { WaitForSingleObject(handle, INFINITE); CloseHandle(handle); }
private:
friend unsigned __stdcall start_wrapper(void* args)
{
Thread* t = (Thread*)args;
t->_start(t->_args);
return 0;
}
HANDLE handle;
void* (*_start)(void*);
void* _args;
};
class NCNN_EXPORT ThreadLocalStorage
{
public:
ThreadLocalStorage() { key = TlsAlloc(); }
~ThreadLocalStorage() { TlsFree(key); }
void set(void* value) { TlsSetValue(key, (LPVOID)value); }
void* get() { return (void*)TlsGetValue(key); }
private:
DWORD key;
};
#else // (defined _WIN32 && !(defined __MINGW32__))
class NCNN_EXPORT Mutex
{
public:
Mutex() { pthread_mutex_init(&mutex, 0); }
~Mutex() { pthread_mutex_destroy(&mutex); }
void lock() { pthread_mutex_lock(&mutex); }
void unlock() { pthread_mutex_unlock(&mutex); }
private:
friend class ConditionVariable;
pthread_mutex_t mutex;
};
class NCNN_EXPORT ConditionVariable
{
public:
ConditionVariable() { pthread_cond_init(&cond, 0); }
~ConditionVariable() { pthread_cond_destroy(&cond); }
void wait(Mutex& mutex) { pthread_cond_wait(&cond, &mutex.mutex); }
void broadcast() { pthread_cond_broadcast(&cond); }
void signal() { pthread_cond_signal(&cond); }
private:
pthread_cond_t cond;
};
class NCNN_EXPORT Thread
{
public:
Thread(void* (*start)(void*), void* args = 0) { pthread_create(&t, 0, start, args); }
~Thread() {}
void join() { pthread_join(t, 0); }
private:
pthread_t t;
};
class NCNN_EXPORT ThreadLocalStorage
{
public:
ThreadLocalStorage() { pthread_key_create(&key, 0); }
~ThreadLocalStorage() { pthread_key_delete(key); }
void set(void* value) { pthread_setspecific(key, value); }
void* get() { return pthread_getspecific(key); }
private:
pthread_key_t key;
};
#endif // (defined _WIN32 && !(defined __MINGW32__))
#else // NCNN_THREADS
class NCNN_EXPORT Mutex
{
public:
Mutex() {}
~Mutex() {}
void lock() {}
void unlock() {}
};
class NCNN_EXPORT ConditionVariable
{
public:
ConditionVariable() {}
~ConditionVariable() {}
void wait(Mutex& /*mutex*/) {}
void broadcast() {}
void signal() {}
};
class NCNN_EXPORT Thread
{
public:
Thread(void* (*/*start*/)(void*), void* /*args*/ = 0) {}
~Thread() {}
void join() {}
};
class NCNN_EXPORT ThreadLocalStorage
{
public:
ThreadLocalStorage() { data = 0; }
~ThreadLocalStorage() {}
void set(void* value) { data = value; }
void* get() { return data; }
private:
void* data;
};
#endif // NCNN_THREADS
class NCNN_EXPORT MutexLockGuard
{
public:
MutexLockGuard(Mutex& _mutex) : mutex(_mutex) { mutex.lock(); }
~MutexLockGuard() { mutex.unlock(); }
private:
Mutex& mutex;
};
} // namespace ncnn
#if NCNN_SIMPLESTL
#include "simplestl.h"
#else
#include <algorithm>
#include <list>
#include <vector>
#include <string>
#endif
#endif // __cplusplus
#if NCNN_STDIO
#if NCNN_PLATFORM_API && __ANDROID_API__ >= 8
#include <android/log.h>
#define NCNN_LOGE(...) do { \
fprintf(stderr, ##__VA_ARGS__); fprintf(stderr, "\n"); \
__android_log_print(ANDROID_LOG_WARN, "ncnn", ##__VA_ARGS__); } while(0)
#else // NCNN_PLATFORM_API && __ANDROID_API__ >= 8
#include <stdio.h>
#define NCNN_LOGE(...) do { \
fprintf(stderr, ##__VA_ARGS__); fprintf(stderr, "\n"); } while(0)
#endif // NCNN_PLATFORM_API && __ANDROID_API__ >= 8
#else
#define NCNN_LOGE(...)
#endif
#if NCNN_FORCE_INLINE
#ifdef _MSC_VER
#define NCNN_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define NCNN_FORCEINLINE inline __attribute__((__always_inline__))
#elif defined(__CLANG__)
#if __has_attribute(__always_inline__)
#define NCNN_FORCEINLINE inline __attribute__((__always_inline__))
#else
#define NCNN_FORCEINLINE inline
#endif
#else
#define NCNN_FORCEINLINE inline
#endif
#else
#define NCNN_FORCEINLINE inline
#endif
#endif // NCNN_PLATFORM_H

501
lib_ncnn/ncnn/simpleocv.h Executable file
View File

@ -0,0 +1,501 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_SIMPLEOCV_H
#define NCNN_SIMPLEOCV_H
#include "platform.h"
#if NCNN_SIMPLEOCV
#include <limits.h>
#include <string.h>
#include "allocator.h"
#include "mat.h"
#if defined(_MSC_VER) || defined(__GNUC__)
#pragma push_macro("min")
#pragma push_macro("max")
#undef min
#undef max
#endif
#ifndef NCNN_XADD
using ncnn::NCNN_XADD;
#endif
typedef unsigned char uchar;
typedef unsigned short ushort;
typedef unsigned int uint;
enum
{
CV_LOAD_IMAGE_UNCHANGED = -1,
CV_LOAD_IMAGE_GRAYSCALE = 0,
CV_LOAD_IMAGE_COLOR = 1,
};
enum
{
CV_IMWRITE_JPEG_QUALITY = 1
};
// minimal opencv style data structure implementation
namespace cv {
template<typename _Tp>
static inline _Tp saturate_cast(int v)
{
return _Tp(v);
}
template<>
inline uchar saturate_cast<uchar>(int v)
{
return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0);
}
template<typename _Tp>
struct Scalar_
{
Scalar_()
{
v[0] = 0;
v[1] = 0;
v[2] = 0;
v[3] = 0;
}
Scalar_(_Tp _v0)
{
v[0] = _v0;
v[1] = 0;
v[2] = 0;
v[3] = 0;
}
Scalar_(_Tp _v0, _Tp _v1, _Tp _v2)
{
v[0] = _v0;
v[1] = _v1;
v[2] = _v2;
v[3] = 0;
}
Scalar_(_Tp _v0, _Tp _v1, _Tp _v2, _Tp _v3)
{
v[0] = _v0;
v[1] = _v1;
v[2] = _v2;
v[3] = _v3;
}
const _Tp operator[](const int i) const
{
return v[i];
}
_Tp operator[](const int i)
{
return v[i];
}
_Tp v[4];
};
typedef Scalar_<uchar> Scalar;
template<typename _Tp>
struct Point_
{
Point_()
: x(0), y(0)
{
}
Point_(_Tp _x, _Tp _y)
: x(_x), y(_y)
{
}
template<typename _Tp2>
operator Point_<_Tp2>() const
{
return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y));
}
_Tp x;
_Tp y;
};
typedef Point_<int> Point;
typedef Point_<float> Point2f;
template<typename _Tp>
struct Size_
{
Size_()
: width(0), height(0)
{
}
Size_(_Tp _w, _Tp _h)
: width(_w), height(_h)
{
}
template<typename _Tp2>
operator Size_<_Tp2>() const
{
return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));
}
_Tp width;
_Tp height;
};
typedef Size_<int> Size;
typedef Size_<float> Size2f;
template<typename _Tp>
struct Rect_
{
Rect_()
: x(0), y(0), width(0), height(0)
{
}
Rect_(_Tp _x, _Tp _y, _Tp _w, _Tp _h)
: x(_x), y(_y), width(_w), height(_h)
{
}
Rect_(Point_<_Tp> _p, Size_<_Tp> _size)
: x(_p.x), y(_p.y), width(_size.width), height(_size.height)
{
}
template<typename _Tp2>
operator Rect_<_Tp2>() const
{
return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height));
}
_Tp x;
_Tp y;
_Tp width;
_Tp height;
// area
_Tp area() const
{
return width * height;
}
};
template<typename _Tp>
static inline Rect_<_Tp>& operator&=(Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
_Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y);
a.width = std::min(a.x + a.width, b.x + b.width) - x1;
a.height = std::min(a.y + a.height, b.y + b.height) - y1;
a.x = x1;
a.y = y1;
if (a.width <= 0 || a.height <= 0)
a = Rect_<_Tp>();
return a;
}
template<typename _Tp>
static inline Rect_<_Tp>& operator|=(Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
_Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y);
a.width = std::max(a.x + a.width, b.x + b.width) - x1;
a.height = std::max(a.y + a.height, b.y + b.height) - y1;
a.x = x1;
a.y = y1;
return a;
}
template<typename _Tp>
static inline Rect_<_Tp> operator&(const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c &= b;
}
template<typename _Tp>
static inline Rect_<_Tp> operator|(const Rect_<_Tp>& a, const Rect_<_Tp>& b)
{
Rect_<_Tp> c = a;
return c |= b;
}
typedef Rect_<int> Rect;
typedef Rect_<float> Rect2f;
#define CV_8UC1 1
#define CV_8UC3 3
#define CV_8UC4 4
#define CV_32FC1 4
struct NCNN_EXPORT Mat
{
Mat()
: data(0), refcount(0), rows(0), cols(0), c(0)
{
}
Mat(int _rows, int _cols, int flags)
: data(0), refcount(0)
{
create(_rows, _cols, flags);
}
// copy
Mat(const Mat& m)
: data(m.data), refcount(m.refcount)
{
if (refcount)
NCNN_XADD(refcount, 1);
rows = m.rows;
cols = m.cols;
c = m.c;
}
Mat(int _rows, int _cols, int flags, void* _data)
: data((unsigned char*)_data), refcount(0)
{
rows = _rows;
cols = _cols;
c = flags;
}
~Mat()
{
release();
}
// assign
Mat& operator=(const Mat& m)
{
if (this == &m)
return *this;
if (m.refcount)
NCNN_XADD(m.refcount, 1);
release();
data = m.data;
refcount = m.refcount;
rows = m.rows;
cols = m.cols;
c = m.c;
return *this;
}
Mat& operator=(const Scalar& s)
{
if (total() > 0)
{
uchar* p = data;
for (int i = 0; i < cols * rows; i++)
{
for (int j = 0; j < c; j++)
{
*p++ = s[j];
}
}
}
return *this;
}
void create(int _rows, int _cols, int flags)
{
release();
rows = _rows;
cols = _cols;
c = flags;
if (total() > 0)
{
// refcount address must be aligned, so we expand totalsize here
size_t totalsize = (total() + 3) >> 2 << 2;
data = (uchar*)ncnn::fastMalloc(totalsize + (int)sizeof(*refcount));
refcount = (int*)(((uchar*)data) + totalsize);
*refcount = 1;
}
}
void release()
{
if (refcount && NCNN_XADD(refcount, -1) == 1)
ncnn::fastFree(data);
data = 0;
rows = 0;
cols = 0;
c = 0;
refcount = 0;
}
Mat clone() const
{
if (empty())
return Mat();
Mat m(rows, cols, c);
if (total() > 0)
{
memcpy(m.data, data, total());
}
return m;
}
bool empty() const
{
return data == 0 || total() == 0;
}
int channels() const
{
return c;
}
int type() const
{
return c;
}
size_t total() const
{
return cols * rows * c;
}
const uchar* ptr(int y) const
{
return data + y * cols * c;
}
uchar* ptr(int y)
{
return data + y * cols * c;
}
template<typename _Tp>
const _Tp* ptr(int y) const
{
return (const _Tp*)data + y * cols * c;
}
template<typename _Tp>
_Tp* ptr(int y)
{
return (_Tp*)data + y * cols * c;
}
// roi
Mat operator()(const Rect& roi) const
{
if (empty())
return Mat();
Mat m(roi.height, roi.width, c);
int sy = roi.y;
for (int y = 0; y < roi.height; y++)
{
const uchar* sptr = ptr(sy) + roi.x * c;
uchar* dptr = m.ptr(y);
memcpy(dptr, sptr, roi.width * c);
sy++;
}
return m;
}
uchar* data;
// pointer to the reference counter;
// when points to user-allocated data, the pointer is NULL
int* refcount;
int rows;
int cols;
int c;
};
enum ImreadModes
{
IMREAD_UNCHANGED = -1,
IMREAD_GRAYSCALE = 0,
IMREAD_COLOR = 1
};
NCNN_EXPORT Mat imread(const std::string& path, int flags = IMREAD_COLOR);
enum ImwriteFlags
{
IMWRITE_JPEG_QUALITY = 1
};
NCNN_EXPORT bool imwrite(const std::string& path, const Mat& m, const std::vector<int>& params = std::vector<int>());
NCNN_EXPORT void imshow(const std::string& name, const Mat& m);
NCNN_EXPORT int waitKey(int delay = 0);
#if NCNN_PIXEL
NCNN_EXPORT void resize(const Mat& src, Mat& dst, const Size& size, float sw = 0.f, float sh = 0.f, int flags = 0);
#endif // NCNN_PIXEL
#if NCNN_PIXEL_DRAWING
enum
{
FILLED = -1
};
NCNN_EXPORT void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness = 1);
NCNN_EXPORT void rectangle(Mat& img, Rect rec, const Scalar& color, int thickness = 1);
NCNN_EXPORT void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness = 1);
NCNN_EXPORT void line(Mat& img, Point p0, Point p1, const Scalar& color, int thickness = 1);
enum
{
FONT_HERSHEY_SIMPLEX = 0
};
NCNN_EXPORT void putText(Mat& img, const std::string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness = 1);
NCNN_EXPORT Size getTextSize(const std::string& text, int fontFace, double fontScale, int thickness, int* baseLine);
#endif // NCNN_PIXEL_DRAWING
} // namespace cv
#if defined(_MSC_VER) || defined(__GNUC__)
#pragma pop_macro("min")
#pragma pop_macro("max")
#endif
#endif // NCNN_SIMPLEOCV
#endif // NCNN_SIMPLEOCV_H

53
lib_ncnn/ncnn/simpleomp.h Executable file
View File

@ -0,0 +1,53 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_SIMPLEOMP_H
#define NCNN_SIMPLEOMP_H
#include "platform.h"
#if NCNN_SIMPLEOMP
#include <stdint.h>
// This minimal openmp runtime implementation only supports the llvm openmp abi
// and only supports #pragma omp parallel for num_threads(X)
#ifdef __cplusplus
extern "C" {
#endif
NCNN_EXPORT int omp_get_max_threads();
NCNN_EXPORT void omp_set_num_threads(int num_threads);
NCNN_EXPORT int omp_get_dynamic();
NCNN_EXPORT void omp_set_dynamic(int dynamic);
NCNN_EXPORT int omp_get_num_threads();
NCNN_EXPORT int omp_get_thread_num();
NCNN_EXPORT int kmp_get_blocktime();
NCNN_EXPORT void kmp_set_blocktime(int blocktime);
#ifdef __cplusplus
}
#endif
#endif // NCNN_SIMPLEOMP
#endif // NCNN_SIMPLEOMP_H

565
lib_ncnn/ncnn/simplestl.h Executable file
View File

@ -0,0 +1,565 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_SIMPLESTL_H
#define NCNN_SIMPLESTL_H
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#if !NCNN_SIMPLESTL
#include <new>
#else
// allocation functions
NCNN_EXPORT void* operator new(size_t size);
NCNN_EXPORT void* operator new[](size_t size);
// placement allocation functions
NCNN_EXPORT void* operator new(size_t size, void* ptr);
NCNN_EXPORT void* operator new[](size_t size, void* ptr);
// deallocation functions
NCNN_EXPORT void operator delete(void* ptr);
NCNN_EXPORT void operator delete[](void* ptr);
// deallocation functions since c++14
#if __cplusplus >= 201402L
NCNN_EXPORT void operator delete(void* ptr, size_t sz);
NCNN_EXPORT void operator delete[](void* ptr, size_t sz);
#endif
// placement deallocation functions
NCNN_EXPORT void operator delete(void* ptr, void* voidptr2);
NCNN_EXPORT void operator delete[](void* ptr, void* voidptr2);
#endif
// minimal stl data structure implementation
namespace std {
template<typename T>
const T& max(const T& a, const T& b)
{
return (a < b) ? b : a;
}
template<typename T>
const T& min(const T& a, const T& b)
{
return (a > b) ? b : a;
}
template<typename T>
void swap(T& a, T& b)
{
T temp(a);
a = b;
b = temp;
}
template<typename T1, typename T2>
struct pair
{
pair()
: first(), second()
{
}
pair(const T1& t1, const T2& t2)
: first(t1), second(t2)
{
}
T1 first;
T2 second;
};
template<typename T1, typename T2>
bool operator==(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return (x.first == y.first && x.second == y.second);
}
template<typename T1, typename T2>
bool operator<(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return x.first < y.first || (!(y.first < x.first) && x.second < y.second);
}
template<typename T1, typename T2>
bool operator!=(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return !(x == y);
}
template<typename T1, typename T2>
bool operator>(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return y < x;
}
template<typename T1, typename T2>
bool operator<=(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return !(y < x);
}
template<typename T1, typename T2>
bool operator>=(const pair<T1, T2>& x, const pair<T1, T2>& y)
{
return !(x < y);
}
template<typename T1, typename T2>
pair<T1, T2> make_pair(const T1& t1, const T2& t2)
{
return pair<T1, T2>(t1, t2);
}
template<typename T>
struct node
{
node* prev_;
node* next_;
T data_;
node()
: prev_(0), next_(0), data_()
{
}
node(const T& t)
: prev_(0), next_(0), data_(t)
{
}
};
template<typename T>
struct iter_list
{
iter_list()
: curr_(0)
{
}
iter_list(node<T>* n)
: curr_(n)
{
}
iter_list(const iter_list& i)
: curr_(i.curr_)
{
}
~iter_list()
{
}
iter_list& operator=(const iter_list& i)
{
curr_ = i.curr_;
return *this;
}
T& operator*()
{
return curr_->data_;
}
T* operator->()
{
return &(curr_->data_);
}
bool operator==(const iter_list& i)
{
return curr_ == i.curr_;
}
bool operator!=(const iter_list& i)
{
return curr_ != i.curr_;
}
iter_list& operator++()
{
curr_ = curr_->next_;
return *this;
}
iter_list& operator--()
{
curr_ = curr_->prev_;
return *this;
}
node<T>* curr_;
};
template<typename T>
struct list
{
typedef iter_list<T> iterator;
list()
{
head_ = new node<T>();
tail_ = head_;
count_ = 0;
}
~list()
{
clear();
delete head_;
}
list(const list& l)
{
head_ = new node<T>();
tail_ = head_;
count_ = 0;
for (iter_list<T> i = l.begin(); i != l.end(); ++i)
{
push_back(*i);
}
}
list& operator=(const list& l)
{
if (this == &l)
{
return *this;
}
clear();
for (iter_list<T> i = l.begin(); i != l.end(); ++i)
{
push_back(*i);
}
return *this;
}
void clear()
{
while (count_ > 0)
{
pop_front();
}
}
void pop_front()
{
if (count_ > 0)
{
head_ = head_->next_;
delete head_->prev_;
head_->prev_ = 0;
--count_;
}
}
size_t size() const
{
return count_;
}
iter_list<T> begin() const
{
return iter_list<T>(head_);
}
iter_list<T> end() const
{
return iter_list<T>(tail_);
}
bool empty() const
{
return count_ == 0;
}
void push_back(const T& t)
{
if (count_ == 0)
{
head_ = new node<T>(t);
head_->prev_ = 0;
head_->next_ = tail_;
tail_->prev_ = head_;
count_ = 1;
}
else
{
node<T>* temp = new node<T>(t);
temp->prev_ = tail_->prev_;
temp->next_ = tail_;
tail_->prev_->next_ = temp;
tail_->prev_ = temp;
++count_;
}
}
iter_list<T> erase(iter_list<T> pos)
{
if (pos != end())
{
node<T>* temp = pos.curr_;
if (temp == head_)
{
++pos;
temp->next_->prev_ = 0;
head_ = temp->next_;
}
else
{
--pos;
temp->next_->prev_ = temp->prev_;
temp->prev_->next_ = temp->next_;
++pos;
}
delete temp;
--count_;
}
return pos;
}
protected:
node<T>* head_;
node<T>* tail_;
size_t count_;
};
template<typename T>
struct greater
{
bool operator()(const T& x, const T& y) const
{
return (x > y);
}
};
template<typename T>
struct less
{
bool operator()(const T& x, const T& y) const
{
return (x < y);
}
};
template<typename RandomAccessIter, typename Compare>
void partial_sort(RandomAccessIter first, RandomAccessIter middle, RandomAccessIter last, Compare comp)
{
// [TODO] heap sort should be used here, but we simply use bubble sort now
for (RandomAccessIter i = first; i < middle; ++i)
{
// bubble sort
for (RandomAccessIter j = last - 1; j > first; --j)
{
if (comp(*j, *(j - 1)))
{
swap(*j, *(j - 1));
}
}
}
}
template<typename T>
struct vector
{
vector()
: data_(0), size_(0), capacity_(0)
{
}
vector(const size_t new_size, const T& value = T())
: data_(0), size_(0), capacity_(0)
{
resize(new_size, value);
}
~vector()
{
clear();
}
vector(const vector& v)
: data_(0), size_(0), capacity_(0)
{
resize(v.size());
for (size_t i = 0; i < size_; i++)
{
data_[i] = v.data_[i];
}
}
vector& operator=(const vector& v)
{
if (this == &v)
{
return *this;
}
resize(0);
resize(v.size());
for (size_t i = 0; i < size_; i++)
{
data_[i] = v.data_[i];
}
return *this;
}
void resize(const size_t new_size, const T& value = T())
{
try_alloc(new_size);
if (new_size > size_)
{
for (size_t i = size_; i < new_size; i++)
{
new (&data_[i]) T(value);
}
}
else if (new_size < size_)
{
for (size_t i = new_size; i < size_; i++)
{
data_[i].~T();
}
}
size_ = new_size;
}
void clear()
{
for (size_t i = 0; i < size_; i++)
{
data_[i].~T();
}
delete[](char*) data_;
data_ = 0;
size_ = 0;
capacity_ = 0;
}
T* data() const
{
return data_;
}
size_t size() const
{
return size_;
}
T& operator[](size_t i) const
{
return data_[i];
}
T* begin() const
{
return &data_[0];
}
T* end() const
{
return &data_[size_];
}
bool empty() const
{
return size_ == 0;
}
void push_back(const T& t)
{
try_alloc(size_ + 1);
new (&data_[size_]) T(t);
size_++;
}
void insert(T* pos, T* b, T* e)
{
vector* v = 0;
if (b >= begin() && b < end())
{
//the same vector
v = new vector(*this);
b = v->begin() + (b - begin());
e = v->begin() + (e - begin());
}
size_t diff = pos - begin();
try_alloc(size_ + (e - b));
pos = begin() + diff;
memmove(pos + (e - b), pos, (end() - pos) * sizeof(T));
size_t len = e - b;
size_ += len;
for (size_t i = 0; i < len; i++)
{
*pos = *b;
pos++;
b++;
}
delete v;
}
T* erase(T* pos)
{
pos->~T();
memmove(pos, pos + 1, (end() - pos - 1) * sizeof(T));
size_--;
return pos;
}
protected:
T* data_;
size_t size_;
size_t capacity_;
void try_alloc(size_t new_size)
{
if (new_size * 3 / 2 > capacity_ / 2)
{
capacity_ = new_size * 2;
T* new_data = (T*)new char[capacity_ * sizeof(T)];
memset(new_data, 0, capacity_ * sizeof(T));
if (data_)
{
memmove(new_data, data_, sizeof(T) * size_);
delete[](char*) data_;
}
data_ = new_data;
}
}
};
struct NCNN_EXPORT string : public vector<char>
{
string()
{
}
string(const char* str)
{
size_t len = strlen(str);
resize(len);
memcpy(data_, str, len);
}
const char* c_str() const
{
return (const char*)data_;
}
bool operator==(const string& str2) const
{
return strcmp(data_, str2.data_) == 0;
}
bool operator==(const char* str2) const
{
return strcmp(data_, str2) == 0;
}
bool operator!=(const char* str2) const
{
return strcmp(data_, str2) != 0;
}
string& operator+=(const string& str1)
{
insert(end(), str1.begin(), str1.end());
return *this;
}
};
inline string operator+(const string& str1, const string& str2)
{
string str(str1);
str.insert(str.end(), str2.begin(), str2.end());
return str;
}
} // namespace std
#endif // NCNN_SIMPLESTL_H

251
lib_ncnn/ncnn/vulkan_header_fix.h Executable file
View File

@ -0,0 +1,251 @@
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_VULKAN_HEADER_FIX_H
#define NCNN_VULKAN_HEADER_FIX_H
#include <vulkan/vulkan.h>
// This header contains new structure and function declearation to fix build with old vulkan sdk
#if VK_HEADER_VERSION < 70
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES (VkStructureType)1000094000
typedef enum VkSubgroupFeatureFlagBits
{
VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001,
VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002,
VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004,
VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008,
VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010,
VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020,
VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040,
VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080,
VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100,
VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkSubgroupFeatureFlagBits;
typedef VkFlags VkSubgroupFeatureFlags;
typedef struct VkPhysicalDeviceSubgroupProperties
{
VkStructureType sType;
void* pNext;
uint32_t subgroupSize;
VkShaderStageFlags supportedStages;
VkSubgroupFeatureFlags supportedOperations;
VkBool32 quadOperationsInAllStages;
} VkPhysicalDeviceSubgroupProperties;
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES (VkStructureType)1000168000
#define VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT (VkStructureType)1000168001
typedef struct VkPhysicalDeviceMaintenance3Properties
{
VkStructureType sType;
void* pNext;
uint32_t maxPerSetDescriptors;
VkDeviceSize maxMemoryAllocationSize;
} VkPhysicalDeviceMaintenance3Properties;
typedef struct VkDescriptorSetLayoutSupport
{
VkStructureType sType;
void* pNext;
VkBool32 supported;
} VkDescriptorSetLayoutSupport;
typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR;
typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR;
typedef void(VKAPI_PTR* PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport);
#endif // VK_HEADER_VERSION < 70
#if VK_HEADER_VERSION < 80
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR (VkStructureType)1000177000
typedef struct VkPhysicalDevice8BitStorageFeaturesKHR
{
VkStructureType sType;
void* pNext;
VkBool32 storageBuffer8BitAccess;
VkBool32 uniformAndStorageBuffer8BitAccess;
VkBool32 storagePushConstant8;
} VkPhysicalDevice8BitStorageFeaturesKHR;
#define VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR (VkStructureType)1000109000
#define VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR (VkStructureType)1000109001
#define VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR (VkStructureType)1000109002
#define VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR (VkStructureType)1000109003
#define VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR (VkStructureType)1000109004
#define VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR (VkStructureType)1000109005
#define VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR (VkStructureType)1000109006
typedef struct VkAttachmentDescription2KHR
{
VkStructureType sType;
const void* pNext;
VkAttachmentDescriptionFlags flags;
VkFormat format;
VkSampleCountFlagBits samples;
VkAttachmentLoadOp loadOp;
VkAttachmentStoreOp storeOp;
VkAttachmentLoadOp stencilLoadOp;
VkAttachmentStoreOp stencilStoreOp;
VkImageLayout initialLayout;
VkImageLayout finalLayout;
} VkAttachmentDescription2KHR;
typedef struct VkAttachmentReference2KHR
{
VkStructureType sType;
const void* pNext;
uint32_t attachment;
VkImageLayout layout;
VkImageAspectFlags aspectMask;
} VkAttachmentReference2KHR;
typedef struct VkSubpassDescription2KHR
{
VkStructureType sType;
const void* pNext;
VkSubpassDescriptionFlags flags;
VkPipelineBindPoint pipelineBindPoint;
uint32_t viewMask;
uint32_t inputAttachmentCount;
const VkAttachmentReference2KHR* pInputAttachments;
uint32_t colorAttachmentCount;
const VkAttachmentReference2KHR* pColorAttachments;
const VkAttachmentReference2KHR* pResolveAttachments;
const VkAttachmentReference2KHR* pDepthStencilAttachment;
uint32_t preserveAttachmentCount;
const uint32_t* pPreserveAttachments;
} VkSubpassDescription2KHR;
typedef struct VkSubpassDependency2KHR
{
VkStructureType sType;
const void* pNext;
uint32_t srcSubpass;
uint32_t dstSubpass;
VkPipelineStageFlags srcStageMask;
VkPipelineStageFlags dstStageMask;
VkAccessFlags srcAccessMask;
VkAccessFlags dstAccessMask;
VkDependencyFlags dependencyFlags;
int32_t viewOffset;
} VkSubpassDependency2KHR;
typedef struct VkRenderPassCreateInfo2KHR
{
VkStructureType sType;
const void* pNext;
VkRenderPassCreateFlags flags;
uint32_t attachmentCount;
const VkAttachmentDescription2KHR* pAttachments;
uint32_t subpassCount;
const VkSubpassDescription2KHR* pSubpasses;
uint32_t dependencyCount;
const VkSubpassDependency2KHR* pDependencies;
uint32_t correlatedViewMaskCount;
const uint32_t* pCorrelatedViewMasks;
} VkRenderPassCreateInfo2KHR;
typedef struct VkSubpassBeginInfoKHR
{
VkStructureType sType;
const void* pNext;
VkSubpassContents contents;
} VkSubpassBeginInfoKHR;
typedef struct VkSubpassEndInfoKHR
{
VkStructureType sType;
const void* pNext;
} VkSubpassEndInfoKHR;
typedef VkResult(VKAPI_PTR* PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
typedef void(VKAPI_PTR* PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
typedef void(VKAPI_PTR* PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo, const VkSubpassEndInfoKHR* pSubpassEndInfo);
typedef void(VKAPI_PTR* PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
#endif // VK_HEADER_VERSION < 80
#if VK_HEADER_VERSION < 95
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR (VkStructureType)1000082000
typedef struct VkPhysicalDeviceFloat16Int8FeaturesKHR
{
VkStructureType sType;
void* pNext;
VkBool32 shaderFloat16;
VkBool32 shaderInt8;
} VkPhysicalDeviceFloat16Int8FeaturesKHR;
#endif // VK_HEADER_VERSION < 95
#if VK_HEADER_VERSION < 97
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT (VkStructureType)1000237000
typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT
{
VkStructureType sType;
void* pNext;
VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS];
VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS];
} VkPhysicalDeviceMemoryBudgetPropertiesEXT;
#endif // VK_HEADER_VERSION < 97
#if VK_HEADER_VERSION < 101
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV (VkStructureType)1000249000
#define VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV (VkStructureType)1000249001
#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV (VkStructureType)1000249002
typedef enum VkComponentTypeNV
{
VK_COMPONENT_TYPE_FLOAT16_NV = 0,
VK_COMPONENT_TYPE_FLOAT32_NV = 1,
VK_COMPONENT_TYPE_FLOAT64_NV = 2,
VK_COMPONENT_TYPE_SINT8_NV = 3,
VK_COMPONENT_TYPE_SINT16_NV = 4,
VK_COMPONENT_TYPE_SINT32_NV = 5,
VK_COMPONENT_TYPE_SINT64_NV = 6,
VK_COMPONENT_TYPE_UINT8_NV = 7,
VK_COMPONENT_TYPE_UINT16_NV = 8,
VK_COMPONENT_TYPE_UINT32_NV = 9,
VK_COMPONENT_TYPE_UINT64_NV = 10,
VK_COMPONENT_TYPE_BEGIN_RANGE_NV = VK_COMPONENT_TYPE_FLOAT16_NV,
VK_COMPONENT_TYPE_END_RANGE_NV = VK_COMPONENT_TYPE_UINT64_NV,
VK_COMPONENT_TYPE_RANGE_SIZE_NV = (VK_COMPONENT_TYPE_UINT64_NV - VK_COMPONENT_TYPE_FLOAT16_NV + 1),
VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
} VkComponentTypeNV;
typedef enum VkScopeNV
{
VK_SCOPE_DEVICE_NV = 1,
VK_SCOPE_WORKGROUP_NV = 2,
VK_SCOPE_SUBGROUP_NV = 3,
VK_SCOPE_QUEUE_FAMILY_NV = 5,
VK_SCOPE_BEGIN_RANGE_NV = VK_SCOPE_DEVICE_NV,
VK_SCOPE_END_RANGE_NV = VK_SCOPE_QUEUE_FAMILY_NV,
VK_SCOPE_RANGE_SIZE_NV = (VK_SCOPE_QUEUE_FAMILY_NV - VK_SCOPE_DEVICE_NV + 1),
VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF
} VkScopeNV;
typedef struct VkCooperativeMatrixPropertiesNV
{
VkStructureType sType;
void* pNext;
uint32_t MSize;
uint32_t NSize;
uint32_t KSize;
VkComponentTypeNV AType;
VkComponentTypeNV BType;
VkComponentTypeNV CType;
VkComponentTypeNV DType;
VkScopeNV scope;
} VkCooperativeMatrixPropertiesNV;
typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV
{
VkStructureType sType;
void* pNext;
VkBool32 cooperativeMatrix;
VkBool32 cooperativeMatrixRobustBufferAccess;
} VkPhysicalDeviceCooperativeMatrixFeaturesNV;
typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV
{
VkStructureType sType;
void* pNext;
VkShaderStageFlags cooperativeMatrixSupportedStages;
} VkPhysicalDeviceCooperativeMatrixPropertiesNV;
typedef VkResult(VKAPI_PTR* PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties);
#endif // VK_HEADER_VERSION < 101
#endif // NCNN_VULKAN_HEADER_FIX_H

View File

@ -0,0 +1,590 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _BASICTYPES_INCLUDED_
#define _BASICTYPES_INCLUDED_
namespace glslang {
//
// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
//
enum TBasicType {
EbtVoid,
EbtFloat,
EbtDouble,
EbtFloat16,
EbtInt8,
EbtUint8,
EbtInt16,
EbtUint16,
EbtInt,
EbtUint,
EbtInt64,
EbtUint64,
EbtBool,
EbtAtomicUint,
EbtSampler,
EbtStruct,
EbtBlock,
EbtAccStruct,
EbtReference,
EbtRayQuery,
#ifndef GLSLANG_WEB
// SPIR-V type defined by spirv_type
EbtSpirvType,
#endif
// HLSL types that live only temporarily.
EbtString,
EbtNumTypes
};
//
// Storage qualifiers. Should align with different kinds of storage or
// resource or GLSL storage qualifier. Expansion is deprecated.
//
// N.B.: You probably DON'T want to add anything here, but rather just add it
// to the built-in variables. See the comment above TBuiltInVariable.
//
// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
// DO NOT follow the design pattern of, say EvqInstanceId, etc.
//
enum TStorageQualifier {
EvqTemporary, // For temporaries (within a function), read/write
EvqGlobal, // For globals read/write
EvqConst, // User-defined constant values, will be semantically constant and constant folded
EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqUniform, // read only, shared with app
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
#ifndef GLSLANG_WEB
EvqSpirvStorageClass, // spirv_storage_class
#endif
EvqPayload,
EvqPayloadIn,
EvqHitAttr,
EvqCallableData,
EvqCallableDataIn,
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
EvqInOut,
EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
// built-ins read by vertex shader
EvqVertexId,
EvqInstanceId,
// built-ins written by vertex shader
EvqPosition,
EvqPointSize,
EvqClipVertex,
// built-ins read by fragment shader
EvqFace,
EvqFragCoord,
EvqPointCoord,
// built-ins written by fragment shader
EvqFragColor,
EvqFragDepth,
// end of list
EvqLast
};
//
// Subcategories of the TStorageQualifier, simply to give a direct mapping
// between built-in variable names and an numerical value (the enum).
//
// For backward compatibility, there is some redundancy between the
// TStorageQualifier and these. Existing members should both be maintained accurately.
// However, any new built-in variable (and any existing non-redundant one)
// must follow the pattern that the specific built-in is here, and only its
// general qualifier is in TStorageQualifier.
//
// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
// shows up as two different built-in variables in a single stage, but
// only has a single enum in TBuiltInVariable, so both the
// TStorageQualifier and the TBuitinVariable are needed to distinguish
// between them.
//
enum TBuiltInVariable {
EbvNone,
EbvNumWorkGroups,
EbvWorkGroupSize,
EbvWorkGroupId,
EbvLocalInvocationId,
EbvGlobalInvocationId,
EbvLocalInvocationIndex,
EbvNumSubgroups,
EbvSubgroupID,
EbvSubGroupSize,
EbvSubGroupInvocation,
EbvSubGroupEqMask,
EbvSubGroupGeMask,
EbvSubGroupGtMask,
EbvSubGroupLeMask,
EbvSubGroupLtMask,
EbvSubgroupSize2,
EbvSubgroupInvocation2,
EbvSubgroupEqMask2,
EbvSubgroupGeMask2,
EbvSubgroupGtMask2,
EbvSubgroupLeMask2,
EbvSubgroupLtMask2,
EbvVertexId,
EbvInstanceId,
EbvVertexIndex,
EbvInstanceIndex,
EbvBaseVertex,
EbvBaseInstance,
EbvDrawId,
EbvPosition,
EbvPointSize,
EbvClipVertex,
EbvClipDistance,
EbvCullDistance,
EbvNormal,
EbvVertex,
EbvMultiTexCoord0,
EbvMultiTexCoord1,
EbvMultiTexCoord2,
EbvMultiTexCoord3,
EbvMultiTexCoord4,
EbvMultiTexCoord5,
EbvMultiTexCoord6,
EbvMultiTexCoord7,
EbvFrontColor,
EbvBackColor,
EbvFrontSecondaryColor,
EbvBackSecondaryColor,
EbvTexCoord,
EbvFogFragCoord,
EbvInvocationId,
EbvPrimitiveId,
EbvLayer,
EbvViewportIndex,
EbvPatchVertices,
EbvTessLevelOuter,
EbvTessLevelInner,
EbvBoundingBox,
EbvTessCoord,
EbvColor,
EbvSecondaryColor,
EbvFace,
EbvFragCoord,
EbvPointCoord,
EbvFragColor,
EbvFragData,
EbvFragDepth,
EbvFragStencilRef,
EbvSampleId,
EbvSamplePosition,
EbvSampleMask,
EbvHelperInvocation,
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
EbvBaryCoordSmooth,
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
EbvViewIndex,
EbvDeviceIndex,
EbvShadingRateKHR,
EbvPrimitiveShadingRateKHR,
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
EbvSecondaryFragDataEXT,
EbvSecondaryFragColorEXT,
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
EbvPositionPerViewNV,
EbvViewportMaskPerViewNV,
EbvFragFullyCoveredNV,
EbvFragmentSizeNV,
EbvInvocationsPerPixelNV,
// ray tracing
EbvLaunchId,
EbvLaunchSize,
EbvInstanceCustomIndex,
EbvGeometryIndex,
EbvWorldRayOrigin,
EbvWorldRayDirection,
EbvObjectRayOrigin,
EbvObjectRayDirection,
EbvRayTmin,
EbvRayTmax,
EbvCullMask,
EbvHitT,
EbvHitKind,
EbvObjectToWorld,
EbvObjectToWorld3x4,
EbvWorldToObject,
EbvWorldToObject3x4,
EbvIncomingRayFlags,
EbvCurrentRayTimeNV,
// barycentrics
EbvBaryCoordNV,
EbvBaryCoordNoPerspNV,
// mesh shaders
EbvTaskCountNV,
EbvPrimitiveCountNV,
EbvPrimitiveIndicesNV,
EbvClipDistancePerViewNV,
EbvCullDistancePerViewNV,
EbvLayerPerViewNV,
EbvMeshViewCountNV,
EbvMeshViewIndicesNV,
// sm builtins
EbvWarpsPerSM,
EbvSMCount,
EbvWarpID,
EbvSMID,
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
EbvFragDepthGreater,
EbvFragDepthLesser,
EbvGsOutputStream,
EbvOutputPatch,
EbvInputPatch,
// structbuffer types
EbvAppendConsume, // no need to differentiate append and consume
EbvRWStructuredBuffer,
EbvStructuredBuffer,
EbvByteAddressBuffer,
EbvRWByteAddressBuffer,
EbvLast
};
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
#ifdef GLSLANG_WEB
__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; }
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; }
#else
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
switch (q) {
case EvqTemporary: return "temp"; break;
case EvqGlobal: return "global"; break;
case EvqConst: return "const"; break;
case EvqConstReadOnly: return "const (read only)"; break;
#ifndef GLSLANG_WEB
case EvqSpirvStorageClass: return "spirv_storage_class"; break;
#endif
case EvqVaryingIn: return "in"; break;
case EvqVaryingOut: return "out"; break;
case EvqUniform: return "uniform"; break;
case EvqBuffer: return "buffer"; break;
case EvqShared: return "shared"; break;
case EvqIn: return "in"; break;
case EvqOut: return "out"; break;
case EvqInOut: return "inout"; break;
case EvqVertexId: return "gl_VertexId"; break;
case EvqInstanceId: return "gl_InstanceId"; break;
case EvqPosition: return "gl_Position"; break;
case EvqPointSize: return "gl_PointSize"; break;
case EvqClipVertex: return "gl_ClipVertex"; break;
case EvqFace: return "gl_FrontFacing"; break;
case EvqFragCoord: return "gl_FragCoord"; break;
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
case EvqPayload: return "rayPayloadNV"; break;
case EvqPayloadIn: return "rayPayloadInNV"; break;
case EvqHitAttr: return "hitAttributeNV"; break;
case EvqCallableData: return "callableDataNV"; break;
case EvqCallableDataIn: return "callableDataInNV"; break;
default: return "unknown qualifier";
}
}
__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
{
switch (v) {
case EbvNone: return "";
case EbvNumWorkGroups: return "NumWorkGroups";
case EbvWorkGroupSize: return "WorkGroupSize";
case EbvWorkGroupId: return "WorkGroupID";
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvNumSubgroups: return "NumSubgroups";
case EbvSubgroupID: return "SubgroupID";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
case EbvSubGroupGeMask: return "SubGroupGeMask";
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvSubgroupSize2: return "SubgroupSize";
case EbvSubgroupInvocation2: return "SubgroupInvocationID";
case EbvSubgroupEqMask2: return "SubgroupEqMask";
case EbvSubgroupGeMask2: return "SubgroupGeMask";
case EbvSubgroupGtMask2: return "SubgroupGtMask";
case EbvSubgroupLeMask2: return "SubgroupLeMask";
case EbvSubgroupLtMask2: return "SubgroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
case EbvInstanceIndex: return "InstanceIndex";
case EbvBaseVertex: return "BaseVertex";
case EbvBaseInstance: return "BaseInstance";
case EbvDrawId: return "DrawId";
case EbvPosition: return "Position";
case EbvPointSize: return "PointSize";
case EbvClipVertex: return "ClipVertex";
case EbvClipDistance: return "ClipDistance";
case EbvCullDistance: return "CullDistance";
case EbvNormal: return "Normal";
case EbvVertex: return "Vertex";
case EbvMultiTexCoord0: return "MultiTexCoord0";
case EbvMultiTexCoord1: return "MultiTexCoord1";
case EbvMultiTexCoord2: return "MultiTexCoord2";
case EbvMultiTexCoord3: return "MultiTexCoord3";
case EbvMultiTexCoord4: return "MultiTexCoord4";
case EbvMultiTexCoord5: return "MultiTexCoord5";
case EbvMultiTexCoord6: return "MultiTexCoord6";
case EbvMultiTexCoord7: return "MultiTexCoord7";
case EbvFrontColor: return "FrontColor";
case EbvBackColor: return "BackColor";
case EbvFrontSecondaryColor: return "FrontSecondaryColor";
case EbvBackSecondaryColor: return "BackSecondaryColor";
case EbvTexCoord: return "TexCoord";
case EbvFogFragCoord: return "FogFragCoord";
case EbvInvocationId: return "InvocationID";
case EbvPrimitiveId: return "PrimitiveID";
case EbvLayer: return "Layer";
case EbvViewportIndex: return "ViewportIndex";
case EbvPatchVertices: return "PatchVertices";
case EbvTessLevelOuter: return "TessLevelOuter";
case EbvTessLevelInner: return "TessLevelInner";
case EbvBoundingBox: return "BoundingBox";
case EbvTessCoord: return "TessCoord";
case EbvColor: return "Color";
case EbvSecondaryColor: return "SecondaryColor";
case EbvFace: return "Face";
case EbvFragCoord: return "FragCoord";
case EbvPointCoord: return "PointCoord";
case EbvFragColor: return "FragColor";
case EbvFragData: return "FragData";
case EbvFragDepth: return "FragDepth";
case EbvFragStencilRef: return "FragStencilRef";
case EbvSampleId: return "SampleId";
case EbvSamplePosition: return "SamplePosition";
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
case EbvBaryCoordSmooth: return "BaryCoordSmooth";
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
case EbvSecondaryFragDataEXT: return "SecondaryFragDataEXT";
case EbvSecondaryFragColorEXT: return "SecondaryFragColorEXT";
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case EbvPositionPerViewNV: return "PositionPerViewNV";
case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
case EbvFragmentSizeNV: return "FragmentSizeNV";
case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
case EbvLaunchId: return "LaunchIdNV";
case EbvLaunchSize: return "LaunchSizeNV";
case EbvInstanceCustomIndex: return "InstanceCustomIndexNV";
case EbvGeometryIndex: return "GeometryIndexEXT";
case EbvWorldRayOrigin: return "WorldRayOriginNV";
case EbvWorldRayDirection: return "WorldRayDirectionNV";
case EbvObjectRayOrigin: return "ObjectRayOriginNV";
case EbvObjectRayDirection: return "ObjectRayDirectionNV";
case EbvRayTmin: return "ObjectRayTminNV";
case EbvRayTmax: return "ObjectRayTmaxNV";
case EbvHitT: return "HitTNV";
case EbvHitKind: return "HitKindNV";
case EbvIncomingRayFlags: return "IncomingRayFlagsNV";
case EbvObjectToWorld: return "ObjectToWorldNV";
case EbvWorldToObject: return "WorldToObjectNV";
case EbvCurrentRayTimeNV: return "CurrentRayTimeNV";
case EbvBaryCoordNV: return "BaryCoordNV";
case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
case EbvTaskCountNV: return "TaskCountNV";
case EbvPrimitiveCountNV: return "PrimitiveCountNV";
case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV";
case EbvCullDistancePerViewNV: return "CullDistancePerViewNV";
case EbvLayerPerViewNV: return "LayerPerViewNV";
case EbvMeshViewCountNV: return "MeshViewCountNV";
case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
case EbvWarpsPerSM: return "WarpsPerSMNV";
case EbvSMCount: return "SMCountNV";
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
case EbvShadingRateKHR: return "ShadingRateKHR";
case EbvPrimitiveShadingRateKHR: return "PrimitiveShadingRateKHR";
default: return "unknown built-in variable";
}
}
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
case EpqNone: return ""; break;
case EpqLow: return "lowp"; break;
case EpqMedium: return "mediump"; break;
case EpqHigh: return "highp"; break;
default: return "unknown precision qualifier";
}
}
#endif
__inline bool isTypeSignedInt(TBasicType type)
{
switch (type) {
case EbtInt8:
case EbtInt16:
case EbtInt:
case EbtInt64:
return true;
default:
return false;
}
}
__inline bool isTypeUnsignedInt(TBasicType type)
{
switch (type) {
case EbtUint8:
case EbtUint16:
case EbtUint:
case EbtUint64:
return true;
default:
return false;
}
}
__inline bool isTypeInt(TBasicType type)
{
return isTypeSignedInt(type) || isTypeUnsignedInt(type);
}
__inline bool isTypeFloat(TBasicType type)
{
switch (type) {
case EbtFloat:
case EbtDouble:
case EbtFloat16:
return true;
default:
return false;
}
}
__inline int getTypeRank(TBasicType type)
{
int res = -1;
switch(type) {
case EbtInt8:
case EbtUint8:
res = 0;
break;
case EbtInt16:
case EbtUint16:
res = 1;
break;
case EbtInt:
case EbtUint:
res = 2;
break;
case EbtInt64:
case EbtUint64:
res = 3;
break;
default:
assert(false);
break;
}
return res;
}
} // end namespace glslang
#endif // _BASICTYPES_INCLUDED_

View File

@ -0,0 +1,340 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMMON_INCLUDED_
#define _COMMON_INCLUDED_
#include <algorithm>
#include <cassert>
#ifdef _MSC_VER
#include <cfloat>
#else
#include <cmath>
#endif
#include <cstdio>
#include <cstdlib>
#include <list>
#include <map>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700)
#include <sstream>
namespace std {
template<typename T>
std::string to_string(const T& val) {
std::ostringstream os;
os << val;
return os.str();
}
}
#endif
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || MINGW_HAS_SECURE_API
#include <basetsd.h>
#ifndef snprintf
#define snprintf sprintf_s
#endif
#define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
#elif defined (solaris)
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <sys/int_types.h>
#define UINT_PTR uintptr_t
#else
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <stdint.h>
#define UINT_PTR uintptr_t
#endif
#if defined(_MSC_VER) && _MSC_VER < 1800
#include <stdlib.h>
inline long long int strtoll (const char* str, char** endptr, int base)
{
return _strtoi64(str, endptr, base);
}
inline unsigned long long int strtoull (const char* str, char** endptr, int base)
{
return _strtoui64(str, endptr, base);
}
inline long long int atoll (const char* str)
{
return strtoll(str, NULL, 10);
}
#endif
#if defined(_MSC_VER)
#define strdup _strdup
#endif
/* windows only pragma */
#ifdef _MSC_VER
#pragma warning(disable : 4786) // Don't warn about too long identifiers
#pragma warning(disable : 4514) // unused inline method
#pragma warning(disable : 4201) // nameless union
#endif
#include "PoolAlloc.h"
//
// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
//
#define POOL_ALLOCATOR_NEW_DELETE(A) \
void* operator new(size_t s) { return (A).allocate(s); } \
void* operator new(size_t, void *_Where) { return (_Where); } \
void operator delete(void*) { } \
void operator delete(void *, void *) { } \
void* operator new[](size_t s) { return (A).allocate(s); } \
void* operator new[](size_t, void *_Where) { return (_Where); } \
void operator delete[](void*) { } \
void operator delete[](void *, void *) { }
namespace glslang {
//
// Pool version of string.
//
typedef pool_allocator<char> TStringAllocator;
typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
} // end namespace glslang
// Repackage the std::hash for use by unordered map/set with a TString key.
namespace std {
template<> struct hash<glslang::TString> {
std::size_t operator()(const glslang::TString& s) const
{
const unsigned _FNV_offset_basis = 2166136261U;
const unsigned _FNV_prime = 16777619U;
unsigned _Val = _FNV_offset_basis;
size_t _Count = s.size();
const char* _First = s.c_str();
for (size_t _Next = 0; _Next < _Count; ++_Next)
{
_Val ^= (unsigned)_First[_Next];
_Val *= _FNV_prime;
}
return _Val;
}
};
}
namespace glslang {
inline TString* NewPoolTString(const char* s)
{
void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
return new(memory) TString(s);
}
template<class T> inline T* NewPoolObject(T*)
{
return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
}
template<class T> inline T* NewPoolObject(T, int instances)
{
return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
}
//
// Pool allocator versions of vectors, lists, and maps
//
template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
TVector() : std::vector<T, pool_allocator<T> >() {}
TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
};
template <class T> class TList : public std::list<T, pool_allocator<T> > {
};
template <class K, class D, class CMP = std::less<K> >
class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
};
template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
};
template <class K, class CMP = std::less<K> >
class TSet : public std::set<K, CMP, pool_allocator<K> > {
};
//
// Persistent string memory. Should only be used for strings that survive
// across compiles/links.
//
typedef std::basic_string<char> TPersistString;
//
// templatized min and max functions.
//
template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
//
// Create a TString object from an integer.
//
#if defined _MSC_VER || MINGW_HAS_SECURE_API
inline const TString String(const int i, const int base = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
_itoa_s(i, text, sizeof(text), base);
return text;
}
#else
inline const TString String(const int i, const int /*base*/ = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
// we assume base 10 for all cases
snprintf(text, sizeof(text), "%d", i);
return text;
}
#endif
struct TSourceLoc {
void init()
{
name = nullptr; string = 0; line = 0; column = 0;
}
void init(int stringNum) { init(); string = stringNum; }
// Returns the name if it exists. Otherwise, returns the string number.
std::string getStringNameOrNum(bool quoteStringName = true) const
{
if (name != nullptr) {
TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name;
std::string ret_str(qstr.c_str());
return ret_str;
}
return std::to_string((long long)string);
}
const char* getFilename() const
{
if (name == nullptr)
return nullptr;
return name->c_str();
}
const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); }
TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr
int string;
int line;
int column;
};
class TPragmaTable : public TMap<TString, TString> {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
};
const int MaxTokenLength = 1024;
template <class T> bool IsPow2(T powerOf2)
{
if (powerOf2 <= 0)
return false;
return (powerOf2 & (powerOf2 - 1)) == 0;
}
// Round number up to a multiple of the given powerOf2, which is not
// a power, just a number that must be a power of 2.
template <class T> void RoundToPow2(T& number, int powerOf2)
{
assert(IsPow2(powerOf2));
number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
}
template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
{
assert(IsPow2(powerOf2));
return ! (number & (powerOf2 - 1));
}
// Returns log2 of an integer power of 2.
// T should be integral.
template <class T> int IntLog2(T n)
{
assert(IsPow2(n));
int result = 0;
while ((T(1) << result) != n) {
result++;
}
return result;
}
inline bool IsInfinity(double x) {
#ifdef _MSC_VER
switch (_fpclass(x)) {
case _FPCLASS_NINF:
case _FPCLASS_PINF:
return true;
default:
return false;
}
#else
return std::isinf(x);
#endif
}
inline bool IsNan(double x) {
#ifdef _MSC_VER
switch (_fpclass(x)) {
case _FPCLASS_SNAN:
case _FPCLASS_QNAN:
return true;
default:
return false;
}
#else
return std::isnan(x);
#endif
}
} // end namespace glslang
#endif // _COMMON_INCLUDED_

View File

@ -0,0 +1,974 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _CONSTANT_UNION_INCLUDED_
#define _CONSTANT_UNION_INCLUDED_
#include "../Include/Common.h"
#include "../Include/BaseTypes.h"
namespace glslang {
class TConstUnion {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnion() : iConst(0), type(EbtInt) { }
void setI8Const(signed char i)
{
i8Const = i;
type = EbtInt8;
}
void setU8Const(unsigned char u)
{
u8Const = u;
type = EbtUint8;
}
void setI16Const(signed short i)
{
i16Const = i;
type = EbtInt16;
}
void setU16Const(unsigned short u)
{
u16Const = u;
type = EbtUint16;
}
void setIConst(int i)
{
iConst = i;
type = EbtInt;
}
void setUConst(unsigned int u)
{
uConst = u;
type = EbtUint;
}
void setI64Const(long long i64)
{
i64Const = i64;
type = EbtInt64;
}
void setU64Const(unsigned long long u64)
{
u64Const = u64;
type = EbtUint64;
}
void setDConst(double d)
{
dConst = d;
type = EbtDouble;
}
void setBConst(bool b)
{
bConst = b;
type = EbtBool;
}
void setSConst(const TString* s)
{
sConst = s;
type = EbtString;
}
signed char getI8Const() const { return i8Const; }
unsigned char getU8Const() const { return u8Const; }
signed short getI16Const() const { return i16Const; }
unsigned short getU16Const() const { return u16Const; }
int getIConst() const { return iConst; }
unsigned int getUConst() const { return uConst; }
long long getI64Const() const { return i64Const; }
unsigned long long getU64Const() const { return u64Const; }
double getDConst() const { return dConst; }
bool getBConst() const { return bConst; }
const TString* getSConst() const { return sConst; }
bool operator==(const signed char i) const
{
if (i == i8Const)
return true;
return false;
}
bool operator==(const unsigned char u) const
{
if (u == u8Const)
return true;
return false;
}
bool operator==(const signed short i) const
{
if (i == i16Const)
return true;
return false;
}
bool operator==(const unsigned short u) const
{
if (u == u16Const)
return true;
return false;
}
bool operator==(const int i) const
{
if (i == iConst)
return true;
return false;
}
bool operator==(const unsigned int u) const
{
if (u == uConst)
return true;
return false;
}
bool operator==(const long long i64) const
{
if (i64 == i64Const)
return true;
return false;
}
bool operator==(const unsigned long long u64) const
{
if (u64 == u64Const)
return true;
return false;
}
bool operator==(const double d) const
{
if (d == dConst)
return true;
return false;
}
bool operator==(const bool b) const
{
if (b == bConst)
return true;
return false;
}
bool operator==(const TConstUnion& constant) const
{
if (constant.type != type)
return false;
switch (type) {
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
#ifndef GLSLANG_WEB
case EbtInt16:
if (constant.i16Const == i16Const)
return true;
break;
case EbtUint16:
if (constant.u16Const == u16Const)
return true;
break;
case EbtInt8:
if (constant.i8Const == i8Const)
return true;
break;
case EbtUint8:
if (constant.u8Const == u8Const)
return true;
break;
case EbtInt64:
if (constant.i64Const == i64Const)
return true;
break;
case EbtUint64:
if (constant.u64Const == u64Const)
return true;
break;
#endif
default:
assert(false && "Default missing");
}
return false;
}
bool operator!=(const signed char i) const
{
return !operator==(i);
}
bool operator!=(const unsigned char u) const
{
return !operator==(u);
}
bool operator!=(const signed short i) const
{
return !operator==(i);
}
bool operator!=(const unsigned short u) const
{
return !operator==(u);
}
bool operator!=(const int i) const
{
return !operator==(i);
}
bool operator!=(const unsigned int u) const
{
return !operator==(u);
}
bool operator!=(const long long i) const
{
return !operator==(i);
}
bool operator!=(const unsigned long long u) const
{
return !operator==(u);
}
bool operator!=(const float f) const
{
return !operator==(f);
}
bool operator!=(const bool b) const
{
return !operator==(b);
}
bool operator!=(const TConstUnion& constant) const
{
return !operator==(constant);
}
bool operator>(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const > constant.i8Const)
return true;
return false;
case EbtUint8:
if (u8Const > constant.u8Const)
return true;
return false;
case EbtInt16:
if (i16Const > constant.i16Const)
return true;
return false;
case EbtUint16:
if (u16Const > constant.u16Const)
return true;
return false;
case EbtInt64:
if (i64Const > constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const > constant.u64Const)
return true;
return false;
#endif
default:
assert(false && "Default missing");
return false;
}
}
bool operator<(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const < constant.i8Const)
return true;
return false;
case EbtUint8:
if (u8Const < constant.u8Const)
return true;
return false;
case EbtInt16:
if (i16Const < constant.i16Const)
return true;
return false;
case EbtUint16:
if (u16Const < constant.u16Const)
return true;
return false;
case EbtInt64:
if (i64Const < constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const < constant.u64Const)
return true;
return false;
#endif
case EbtDouble:
if (dConst < constant.dConst)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
default:
assert(false && "Default missing");
return false;
}
}
TConstUnion operator+(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator-(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator*(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator%(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator>>(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break;
case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break;
case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break;
case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint8:
switch (constant.type) {
case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break;
case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break;
case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break;
case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt16:
switch (constant.type) {
case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break;
case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break;
case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break;
case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint16:
switch (constant.type) {
case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break;
case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break;
case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break;
case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
#ifndef GLSLANG_WEB
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break;
case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break;
case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator<<(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break;
case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break;
case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break;
case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break;
case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break;
case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint8:
switch (constant.type) {
case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break;
case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break;
case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break;
case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break;
case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break;
case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break;
case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt16:
switch (constant.type) {
case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break;
case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break;
case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break;
case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break;
case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break;
case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break;
case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint16:
switch (constant.type) {
case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break;
case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break;
case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break;
case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break;
case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break;
case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break;
case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break;
case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break;
case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break;
case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break;
case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break;
case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break;
case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break;
case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator|(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator^(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator~() const
{
TConstUnion returnValue;
switch (type) {
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(~i8Const); break;
case EbtUint8: returnValue.setU8Const(~u8Const); break;
case EbtInt16: returnValue.setI16Const(~i16Const); break;
case EbtUint16: returnValue.setU16Const(~u16Const); break;
case EbtInt64: returnValue.setI64Const(~i64Const); break;
case EbtUint64: returnValue.setU64Const(~u64Const); break;
#endif
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator||(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TBasicType getType() const { return type; }
private:
union {
signed char i8Const; // used for i8vec, scalar int8s
unsigned char u8Const; // used for u8vec, scalar uint8s
signed short i16Const; // used for i16vec, scalar int16s
unsigned short u16Const; // used for u16vec, scalar uint16s
int iConst; // used for ivec, scalar ints
unsigned int uConst; // used for uvec, scalar uints
long long i64Const; // used for i64vec, scalar int64s
unsigned long long u64Const; // used for u64vec, scalar uint64s
bool bConst; // used for bvec, scalar bools
double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
const TString* sConst; // string constant
};
TBasicType type;
};
// Encapsulate having a pointer to an array of TConstUnion,
// which only needs to be allocated if its size is going to be
// bigger than 0.
//
// One convenience is being able to use [] to go inside the array, instead
// of C++ assuming it as an array of pointers to vectors.
//
// General usage is that the size is known up front, and it is
// created once with the proper size.
//
class TConstUnionArray {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnionArray() : unionArray(nullptr) { }
virtual ~TConstUnionArray() { }
explicit TConstUnionArray(int size)
{
if (size == 0)
unionArray = nullptr;
else
unionArray = new TConstUnionVector(size);
}
TConstUnionArray(const TConstUnionArray& a) = default;
TConstUnionArray(const TConstUnionArray& a, int start, int size)
{
unionArray = new TConstUnionVector(size);
for (int i = 0; i < size; ++i)
(*unionArray)[i] = a[start + i];
}
// Use this constructor for a smear operation
TConstUnionArray(int size, const TConstUnion& val)
{
unionArray = new TConstUnionVector(size, val);
}
int size() const { return unionArray ? (int)unionArray->size() : 0; }
TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
bool operator==(const TConstUnionArray& rhs) const
{
// this includes the case that both are unallocated
if (unionArray == rhs.unionArray)
return true;
if (! unionArray || ! rhs.unionArray)
return false;
return *unionArray == *rhs.unionArray;
}
bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
double dot(const TConstUnionArray& rhs)
{
assert(rhs.unionArray->size() == unionArray->size());
double sum = 0.0;
for (size_t comp = 0; comp < unionArray->size(); ++comp)
sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
return sum;
}
bool empty() const { return unionArray == nullptr; }
protected:
typedef TVector<TConstUnion> TConstUnionVector;
TConstUnionVector* unionArray;
};
} // end namespace glslang
#endif // _CONSTANT_UNION_INCLUDED_

View File

@ -0,0 +1,144 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _INFOSINK_INCLUDED_
#define _INFOSINK_INCLUDED_
#include "../Include/Common.h"
#include <cmath>
namespace glslang {
//
// TPrefixType is used to centralize how info log messages start.
// See below.
//
enum TPrefixType {
EPrefixNone,
EPrefixWarning,
EPrefixError,
EPrefixInternalError,
EPrefixUnimplemented,
EPrefixNote
};
enum TOutputStream {
ENull = 0,
EDebugger = 0x01,
EStdOut = 0x02,
EString = 0x04,
};
//
// Encapsulate info logs for all objects that have them.
//
// The methods are a general set of tools for getting a variety of
// messages and types inserted into the log.
//
class TInfoSinkBase {
public:
TInfoSinkBase() : outputStream(4) {}
void erase() { sink.erase(); }
TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
append(buf);
return *this; }
TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
const char* c_str() const { return sink.c_str(); }
void prefix(TPrefixType message) {
switch(message) {
case EPrefixNone: break;
case EPrefixWarning: append("WARNING: "); break;
case EPrefixError: append("ERROR: "); break;
case EPrefixInternalError: append("INTERNAL ERROR: "); break;
case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
case EPrefixNote: append("NOTE: "); break;
default: append("UNKNOWN ERROR: "); break;
}
}
void location(const TSourceLoc& loc) {
const int maxSize = 24;
char locText[maxSize];
snprintf(locText, maxSize, ":%d", loc.line);
append(loc.getStringNameOrNum(false).c_str());
append(locText);
append(": ");
}
void message(TPrefixType message, const char* s) {
prefix(message);
append(s);
append("\n");
}
void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
prefix(message);
location(loc);
append(s);
append("\n");
}
void setOutputStream(int output = 4)
{
outputStream = output;
}
protected:
void append(const char* s);
void append(int count, char c);
void append(const TPersistString& t);
void append(const TString& t);
void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
sink.reserve(sink.capacity() + sink.capacity() / 2); }
void appendToStream(const char* s);
TPersistString sink;
int outputStream;
};
} // end namespace glslang
class TInfoSink {
public:
glslang::TInfoSinkBase info;
glslang::TInfoSinkBase debug;
};
#endif // _INFOSINK_INCLUDED_

View File

@ -0,0 +1,44 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef __INITIALIZE_GLOBALS_INCLUDED_
#define __INITIALIZE_GLOBALS_INCLUDED_
namespace glslang {
bool InitializePoolIndex();
} // end namespace glslang
#endif // __INITIALIZE_GLOBALS_INCLUDED_

View File

@ -0,0 +1,318 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _POOLALLOC_INCLUDED_
#define _POOLALLOC_INCLUDED_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
// STL containers can use this allocator by using the pool_allocator
// class as the allocator (second) template argument.
//
#include <cstddef>
#include <cstring>
#include <vector>
namespace glslang {
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class TAllocation {
public:
TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
size(size), mem(mem), prevAlloc(prev) {
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
# ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char* preGuard() const { return mem + headerSize(); }
unsigned char* data() const { return preGuard() + guardBlockSize; }
unsigned char* postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char* mem; // beginning of our allocation (pts to header)
TAllocation* prevAlloc; // prior allocation in the chain
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator {
public:
TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
//
// Don't call the destructor just to free up the memory, call pop()
//
~TPoolAllocator();
//
// Call push() to establish a new place to pop memory too. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void* allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
protected:
friend struct tHeader;
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
lastAllocation(0),
#endif
nextPage(nextPage), pageCount(pageCount) { }
~tHeader() {
#ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
#endif
}
#ifdef GUARD_BLOCKS
TAllocation* lastAllocation;
#endif
tHeader* nextPage;
size_t pageCount;
};
struct tAllocState {
size_t offset;
tHeader* page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
#ifndef GUARD_BLOCKS
void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
#else
void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
new(memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
#endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader* freeList; // list of popped memory
tHeader* inUseList; // list of all memory currently being used
tAllocStack stack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
private:
TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
};
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
// with everyone using the same global allocator.
//
extern TPoolAllocator& GetThreadPoolAllocator();
void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
//
// This STL compatible allocator is intended to be used as the allocator
// parameter to templatized STL containers, like vector and map.
//
// It will use the pools for allocation, and not
// do any deallocation, but will still do destruction.
//
template<class T>
class pool_allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class Other>
struct rebind {
typedef pool_allocator<Other> other;
};
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pool_allocator() : allocator(GetThreadPoolAllocator()) { }
pool_allocator(TPoolAllocator& a) : allocator(a) { }
pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
template<class Other>
pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
pointer allocate(size_type n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
pointer allocate(size_type n, const void*) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
void deallocate(void*, size_type) { }
void deallocate(pointer, size_type) { }
pointer _Charalloc(size_t n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
void construct(pointer p, const T& val) { new ((void *)p) T(val); }
void destroy(pointer p) { p->T::~T(); }
bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
TPoolAllocator& getAllocator() const { return allocator; }
pool_allocator select_on_container_copy_construction() const { return pool_allocator{}; }
protected:
pool_allocator& operator=(const pool_allocator&) { return *this; }
TPoolAllocator& allocator;
};
} // end namespace glslang
#endif // _POOLALLOC_INCLUDED_

View File

@ -0,0 +1,150 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _RESOURCE_LIMITS_INCLUDED_
#define _RESOURCE_LIMITS_INCLUDED_
struct TLimits {
bool nonInductiveForLoops;
bool whileLoops;
bool doWhileLoops;
bool generalUniformIndexing;
bool generalAttributeMatrixVectorIndexing;
bool generalVaryingIndexing;
bool generalSamplerIndexing;
bool generalVariableIndexing;
bool generalConstantMatrixVectorIndexing;
};
struct TBuiltInResource {
int maxLights;
int maxClipPlanes;
int maxTextureUnits;
int maxTextureCoords;
int maxVertexAttribs;
int maxVertexUniformComponents;
int maxVaryingFloats;
int maxVertexTextureImageUnits;
int maxCombinedTextureImageUnits;
int maxTextureImageUnits;
int maxFragmentUniformComponents;
int maxDrawBuffers;
int maxVertexUniformVectors;
int maxVaryingVectors;
int maxFragmentUniformVectors;
int maxVertexOutputVectors;
int maxFragmentInputVectors;
int minProgramTexelOffset;
int maxProgramTexelOffset;
int maxClipDistances;
int maxComputeWorkGroupCountX;
int maxComputeWorkGroupCountY;
int maxComputeWorkGroupCountZ;
int maxComputeWorkGroupSizeX;
int maxComputeWorkGroupSizeY;
int maxComputeWorkGroupSizeZ;
int maxComputeUniformComponents;
int maxComputeTextureImageUnits;
int maxComputeImageUniforms;
int maxComputeAtomicCounters;
int maxComputeAtomicCounterBuffers;
int maxVaryingComponents;
int maxVertexOutputComponents;
int maxGeometryInputComponents;
int maxGeometryOutputComponents;
int maxFragmentInputComponents;
int maxImageUnits;
int maxCombinedImageUnitsAndFragmentOutputs;
int maxCombinedShaderOutputResources;
int maxImageSamples;
int maxVertexImageUniforms;
int maxTessControlImageUniforms;
int maxTessEvaluationImageUniforms;
int maxGeometryImageUniforms;
int maxFragmentImageUniforms;
int maxCombinedImageUniforms;
int maxGeometryTextureImageUnits;
int maxGeometryOutputVertices;
int maxGeometryTotalOutputComponents;
int maxGeometryUniformComponents;
int maxGeometryVaryingComponents;
int maxTessControlInputComponents;
int maxTessControlOutputComponents;
int maxTessControlTextureImageUnits;
int maxTessControlUniformComponents;
int maxTessControlTotalOutputComponents;
int maxTessEvaluationInputComponents;
int maxTessEvaluationOutputComponents;
int maxTessEvaluationTextureImageUnits;
int maxTessEvaluationUniformComponents;
int maxTessPatchComponents;
int maxPatchVertices;
int maxTessGenLevel;
int maxViewports;
int maxVertexAtomicCounters;
int maxTessControlAtomicCounters;
int maxTessEvaluationAtomicCounters;
int maxGeometryAtomicCounters;
int maxFragmentAtomicCounters;
int maxCombinedAtomicCounters;
int maxAtomicCounterBindings;
int maxVertexAtomicCounterBuffers;
int maxTessControlAtomicCounterBuffers;
int maxTessEvaluationAtomicCounterBuffers;
int maxGeometryAtomicCounterBuffers;
int maxFragmentAtomicCounterBuffers;
int maxCombinedAtomicCounterBuffers;
int maxAtomicCounterBufferSize;
int maxTransformFeedbackBuffers;
int maxTransformFeedbackInterleavedComponents;
int maxCullDistances;
int maxCombinedClipAndCullDistances;
int maxSamples;
int maxMeshOutputVerticesNV;
int maxMeshOutputPrimitivesNV;
int maxMeshWorkGroupSizeX_NV;
int maxMeshWorkGroupSizeY_NV;
int maxMeshWorkGroupSizeZ_NV;
int maxTaskWorkGroupSizeX_NV;
int maxTaskWorkGroupSizeY_NV;
int maxTaskWorkGroupSizeZ_NV;
int maxMeshViewCountNV;
int maxDualSourceDrawBuffersEXT;
TLimits limits;
};
#endif // _RESOURCE_LIMITS_INCLUDED_

View File

@ -0,0 +1,176 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _SHHANDLE_INCLUDED_
#define _SHHANDLE_INCLUDED_
//
// Machine independent part of the compiler private objects
// sent as ShHandle to the driver.
//
// This should not be included by driver code.
//
#define SH_EXPORTING
#include "../Public/ShaderLang.h"
#include "../MachineIndependent/Versions.h"
#include "InfoSink.h"
class TCompiler;
class TLinker;
class TUniformMap;
//
// The base class used to back handles returned to the driver.
//
class TShHandleBase {
public:
TShHandleBase() { pool = new glslang::TPoolAllocator; }
virtual ~TShHandleBase() { delete pool; }
virtual TCompiler* getAsCompiler() { return 0; }
virtual TLinker* getAsLinker() { return 0; }
virtual TUniformMap* getAsUniformMap() { return 0; }
virtual glslang::TPoolAllocator* getPool() const { return pool; }
private:
glslang::TPoolAllocator* pool;
};
//
// The base class for the machine dependent linker to derive from
// for managing where uniforms live.
//
class TUniformMap : public TShHandleBase {
public:
TUniformMap() { }
virtual ~TUniformMap() { }
virtual TUniformMap* getAsUniformMap() { return this; }
virtual int getLocation(const char* name) = 0;
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink infoSink;
};
class TIntermNode;
//
// The base class for the machine dependent compiler to derive from
// for managing object code from the compile.
//
class TCompiler : public TShHandleBase {
public:
TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
virtual ~TCompiler() { }
EShLanguage getLanguage() { return language; }
virtual TInfoSink& getInfoSink() { return infoSink; }
virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
virtual TCompiler* getAsCompiler() { return this; }
virtual bool linkable() { return haveValidObjectCode; }
TInfoSink& infoSink;
protected:
TCompiler& operator=(TCompiler&);
EShLanguage language;
bool haveValidObjectCode;
};
//
// Link operations are based on a list of compile results...
//
typedef glslang::TVector<TCompiler*> TCompilerList;
typedef glslang::TVector<TShHandleBase*> THandleList;
//
// The base class for the machine dependent linker to derive from
// to manage the resulting executable.
//
class TLinker : public TShHandleBase {
public:
TLinker(EShExecutable e, TInfoSink& iSink) :
infoSink(iSink),
executable(e),
haveReturnableObjectCode(false),
appAttributeBindings(0),
fixedAttributeBindings(0),
excludedAttributes(0),
excludedCount(0),
uniformBindings(0) { }
virtual TLinker* getAsLinker() { return this; }
virtual ~TLinker() { }
virtual bool link(TCompilerList&, TUniformMap*) = 0;
virtual bool link(THandleList&) { return false; }
virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink& infoSink;
protected:
TLinker& operator=(TLinker&);
EShExecutable executable;
bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
const ShBindingTable* appAttributeBindings;
const ShBindingTable* fixedAttributeBindings;
const int* excludedAttributes;
int excludedCount;
ShBindingTable* uniformBindings; // created by the linker
};
//
// This is the interface between the machine independent code
// and the machine dependent code.
//
// The machine dependent code should derive from the classes
// above. Then Construct*() and Delete*() will create and
// destroy the machine dependent objects, which contain the
// above machine independent information.
//
TCompiler* ConstructCompiler(EShLanguage, int);
TShHandleBase* ConstructLinker(EShExecutable, int);
TShHandleBase* ConstructBindings();
void DeleteLinker(TShHandleBase*);
void DeleteBindingList(TShHandleBase* bindingList);
TUniformMap* ConstructUniformMap();
void DeleteCompiler(TCompiler*);
void DeleteUniformMap(TUniformMap*);
#endif // _SHHANDLE_INCLUDED_

View File

@ -0,0 +1,128 @@
//
// Copyright(C) 2021 Advanced Micro Devices, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#ifndef GLSLANG_WEB
//
// GL_EXT_spirv_intrinsics
//
#include "Common.h"
namespace glslang {
class TIntermTyped;
class TIntermConstantUnion;
class TType;
// SPIR-V requirements
struct TSpirvRequirement {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// capability = [..]
TSet<TString> extensions;
// extension = [..]
TSet<int> capabilities;
};
// SPIR-V execution modes
struct TSpirvExecutionMode {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// spirv_execution_mode
TMap<int, TVector<const TIntermConstantUnion*>> modes;
// spirv_execution_mode_id
TMap<int, TVector<const TIntermTyped*> > modeIds;
};
// SPIR-V decorations
struct TSpirvDecorate {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// spirv_decorate
TMap<int, TVector<const TIntermConstantUnion*> > decorates;
// spirv_decorate_id
TMap<int, TVector<const TIntermTyped*>> decorateIds;
// spirv_decorate_string
TMap<int, TVector<const TIntermConstantUnion*> > decorateStrings;
};
// SPIR-V instruction
struct TSpirvInstruction {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSpirvInstruction() { set = ""; id = -1; }
bool operator==(const TSpirvInstruction& rhs) const { return set == rhs.set && id == rhs.id; }
bool operator!=(const TSpirvInstruction& rhs) const { return !operator==(rhs); }
// spirv_instruction
TString set;
int id;
};
// SPIR-V type parameter
struct TSpirvTypeParameter {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSpirvTypeParameter(const TIntermConstantUnion* arg) { constant = arg; }
bool operator==(const TSpirvTypeParameter& rhs) const { return constant == rhs.constant; }
bool operator!=(const TSpirvTypeParameter& rhs) const { return !operator==(rhs); }
const TIntermConstantUnion* constant;
};
typedef TVector<TSpirvTypeParameter> TSpirvTypeParameters;
// SPIR-V type
struct TSpirvType {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
bool operator==(const TSpirvType& rhs) const
{
return spirvInst == rhs.spirvInst && typeParams == rhs.typeParams;
}
bool operator!=(const TSpirvType& rhs) const { return !operator==(rhs); }
// spirv_type
TSpirvInstruction spirvInst;
TSpirvTypeParameters typeParams;
};
} // end namespace glslang
#endif // GLSLANG_WEB

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,341 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Implement types for tracking GLSL arrays, arrays of arrays, etc.
//
#ifndef _ARRAYS_INCLUDED
#define _ARRAYS_INCLUDED
#include <algorithm>
namespace glslang {
// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
const int UnsizedArraySize = 0;
class TIntermTyped;
extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
// Specialization constants need both a nominal size and a node that defines
// the specialization constant being used. Array types are the same when their
// size and specialization constant nodes are the same.
struct TArraySize {
unsigned int size;
TIntermTyped* node; // nullptr means no specialization constant node
bool operator==(const TArraySize& rhs) const
{
if (size != rhs.size)
return false;
if (node == nullptr || rhs.node == nullptr)
return node == rhs.node;
return SameSpecializationConstants(node, rhs.node);
}
};
//
// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
// It has generic-container semantics, while TArraySizes has array-of-array semantics.
// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
//
struct TSmallArrayVector {
//
// TODO: memory: TSmallArrayVector is intended to be smaller.
// Almost all arrays could be handled by two sizes each fitting
// in 16 bits, needing a real vector only in the cases where there
// are more than 3 sizes or a size needing more than 16 bits.
//
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSmallArrayVector() : sizes(nullptr) { }
virtual ~TSmallArrayVector() { dealloc(); }
// For breaking into two non-shared copies, independently modifiable.
TSmallArrayVector& operator=(const TSmallArrayVector& from)
{
if (from.sizes == nullptr)
sizes = nullptr;
else {
alloc();
*sizes = *from.sizes;
}
return *this;
}
int size() const
{
if (sizes == nullptr)
return 0;
return (int)sizes->size();
}
unsigned int frontSize() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().size;
}
TIntermTyped* frontNode() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().node;
}
void changeFront(unsigned int s)
{
assert(sizes != nullptr);
// this should only happen for implicitly sized arrays, not specialization constants
assert(sizes->front().node == nullptr);
sizes->front().size = s;
}
void push_back(unsigned int e, TIntermTyped* n)
{
alloc();
TArraySize pair = { e, n };
sizes->push_back(pair);
}
void push_back(const TSmallArrayVector& newDims)
{
alloc();
sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end());
}
void pop_front()
{
assert(sizes != nullptr && sizes->size() > 0);
if (sizes->size() == 1)
dealloc();
else
sizes->erase(sizes->begin());
}
// 'this' should currently not be holding anything, and copyNonFront
// will make it hold a copy of all but the first element of rhs.
// (This would be useful for making a type that is dereferenced by
// one dimension.)
void copyNonFront(const TSmallArrayVector& rhs)
{
assert(sizes == nullptr);
if (rhs.size() > 1) {
alloc();
sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
}
}
unsigned int getDimSize(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].size;
}
void setDimSize(int i, unsigned int size) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
assert((*sizes)[i].node == nullptr);
(*sizes)[i].size = size;
}
TIntermTyped* getDimNode(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].node;
}
bool operator==(const TSmallArrayVector& rhs) const
{
if (sizes == nullptr && rhs.sizes == nullptr)
return true;
if (sizes == nullptr || rhs.sizes == nullptr)
return false;
return *sizes == *rhs.sizes;
}
bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
protected:
TSmallArrayVector(const TSmallArrayVector&);
void alloc()
{
if (sizes == nullptr)
sizes = new TVector<TArraySize>;
}
void dealloc()
{
delete sizes;
sizes = nullptr;
}
TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
};
//
// Represent an array, or array of arrays, to arbitrary depth. This is not
// done through a hierarchy of types in a type tree, rather all contiguous arrayness
// in the type hierarchy is localized into this single cumulative object.
//
// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
// for the vast majority of types that are non-array types.
//
// Order Policy: these are all identical:
// - left to right order within a contiguous set of ...[..][..][..]... in the source language
// - index order 0, 1, 2, ... within the 'sizes' member below
// - outer-most to inner-most
//
struct TArraySizes {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TArraySizes() : implicitArraySize(1), variablyIndexed(false) { }
// For breaking into two non-shared copies, independently modifiable.
TArraySizes& operator=(const TArraySizes& from)
{
implicitArraySize = from.implicitArraySize;
variablyIndexed = from.variablyIndexed;
sizes = from.sizes;
return *this;
}
// translate from array-of-array semantics to container semantics
int getNumDims() const { return sizes.size(); }
int getDimSize(int dim) const { return sizes.getDimSize(dim); }
TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
int getOuterSize() const { return sizes.frontSize(); }
TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
int getCumulativeSize() const
{
int size = 1;
for (int d = 0; d < sizes.size(); ++d) {
// this only makes sense in paths that have a known array size
assert(sizes.getDimSize(d) != UnsizedArraySize);
size *= sizes.getDimSize(d);
}
return size;
}
void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
void addInnerSize(TArraySize pair) {
sizes.push_back(pair.size, pair.node);
}
void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
int getImplicitSize() const { return implicitArraySize; }
void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); }
bool isInnerUnsized() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
return true;
}
return false;
}
bool clearInnerUnsized()
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
setDimSize(d, 1);
}
return false;
}
bool isInnerSpecialization() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimNode(d) != nullptr)
return true;
}
return false;
}
bool isOuterSpecialization()
{
return sizes.getDimNode(0) != nullptr;
}
bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); }
bool isSized() const { return getOuterSize() != UnsizedArraySize; }
void dereference() { sizes.pop_front(); }
void copyDereferenced(const TArraySizes& rhs)
{
assert(sizes.size() == 0);
if (rhs.sizes.size() > 1)
sizes.copyNonFront(rhs.sizes);
}
bool sameInnerArrayness(const TArraySizes& rhs) const
{
if (sizes.size() != rhs.sizes.size())
return false;
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
return false;
}
return true;
}
void setVariablyIndexed() { variablyIndexed = true; }
bool isVariablyIndexed() const { return variablyIndexed; }
bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
protected:
TSmallArrayVector sizes;
TArraySizes(const TArraySizes&);
// For tracking maximum referenced compile-time constant index.
// Applies only to the outer-most dimension. Potentially becomes
// the implicit size of the array, if not variably indexed and
// otherwise legal.
int implicitArraySize;
bool variablyIndexed; // true if array is indexed with a non compile-time constant
};
} // end namespace glslang
#endif // _ARRAYS_INCLUDED_

View File

@ -0,0 +1,254 @@
/**
This code is based on the glslang_c_interface implementation by Viktor Latypov
**/
/**
BSD 2-Clause License
Copyright (c) 2019, Viktor Latypov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#ifndef GLSLANG_C_IFACE_H_INCLUDED
#define GLSLANG_C_IFACE_H_INCLUDED
#include <stdbool.h>
#include <stdlib.h>
#include "glslang_c_shader_types.h"
typedef struct glslang_shader_s glslang_shader_t;
typedef struct glslang_program_s glslang_program_t;
/* TLimits counterpart */
typedef struct glslang_limits_s {
bool non_inductive_for_loops;
bool while_loops;
bool do_while_loops;
bool general_uniform_indexing;
bool general_attribute_matrix_vector_indexing;
bool general_varying_indexing;
bool general_sampler_indexing;
bool general_variable_indexing;
bool general_constant_matrix_vector_indexing;
} glslang_limits_t;
/* TBuiltInResource counterpart */
typedef struct glslang_resource_s {
int max_lights;
int max_clip_planes;
int max_texture_units;
int max_texture_coords;
int max_vertex_attribs;
int max_vertex_uniform_components;
int max_varying_floats;
int max_vertex_texture_image_units;
int max_combined_texture_image_units;
int max_texture_image_units;
int max_fragment_uniform_components;
int max_draw_buffers;
int max_vertex_uniform_vectors;
int max_varying_vectors;
int max_fragment_uniform_vectors;
int max_vertex_output_vectors;
int max_fragment_input_vectors;
int min_program_texel_offset;
int max_program_texel_offset;
int max_clip_distances;
int max_compute_work_group_count_x;
int max_compute_work_group_count_y;
int max_compute_work_group_count_z;
int max_compute_work_group_size_x;
int max_compute_work_group_size_y;
int max_compute_work_group_size_z;
int max_compute_uniform_components;
int max_compute_texture_image_units;
int max_compute_image_uniforms;
int max_compute_atomic_counters;
int max_compute_atomic_counter_buffers;
int max_varying_components;
int max_vertex_output_components;
int max_geometry_input_components;
int max_geometry_output_components;
int max_fragment_input_components;
int max_image_units;
int max_combined_image_units_and_fragment_outputs;
int max_combined_shader_output_resources;
int max_image_samples;
int max_vertex_image_uniforms;
int max_tess_control_image_uniforms;
int max_tess_evaluation_image_uniforms;
int max_geometry_image_uniforms;
int max_fragment_image_uniforms;
int max_combined_image_uniforms;
int max_geometry_texture_image_units;
int max_geometry_output_vertices;
int max_geometry_total_output_components;
int max_geometry_uniform_components;
int max_geometry_varying_components;
int max_tess_control_input_components;
int max_tess_control_output_components;
int max_tess_control_texture_image_units;
int max_tess_control_uniform_components;
int max_tess_control_total_output_components;
int max_tess_evaluation_input_components;
int max_tess_evaluation_output_components;
int max_tess_evaluation_texture_image_units;
int max_tess_evaluation_uniform_components;
int max_tess_patch_components;
int max_patch_vertices;
int max_tess_gen_level;
int max_viewports;
int max_vertex_atomic_counters;
int max_tess_control_atomic_counters;
int max_tess_evaluation_atomic_counters;
int max_geometry_atomic_counters;
int max_fragment_atomic_counters;
int max_combined_atomic_counters;
int max_atomic_counter_bindings;
int max_vertex_atomic_counter_buffers;
int max_tess_control_atomic_counter_buffers;
int max_tess_evaluation_atomic_counter_buffers;
int max_geometry_atomic_counter_buffers;
int max_fragment_atomic_counter_buffers;
int max_combined_atomic_counter_buffers;
int max_atomic_counter_buffer_size;
int max_transform_feedback_buffers;
int max_transform_feedback_interleaved_components;
int max_cull_distances;
int max_combined_clip_and_cull_distances;
int max_samples;
int max_mesh_output_vertices_nv;
int max_mesh_output_primitives_nv;
int max_mesh_work_group_size_x_nv;
int max_mesh_work_group_size_y_nv;
int max_mesh_work_group_size_z_nv;
int max_task_work_group_size_x_nv;
int max_task_work_group_size_y_nv;
int max_task_work_group_size_z_nv;
int max_mesh_view_count_nv;
int maxDualSourceDrawBuffersEXT;
glslang_limits_t limits;
} glslang_resource_t;
typedef struct glslang_input_s {
glslang_source_t language;
glslang_stage_t stage;
glslang_client_t client;
glslang_target_client_version_t client_version;
glslang_target_language_t target_language;
glslang_target_language_version_t target_language_version;
/** Shader source code */
const char* code;
int default_version;
glslang_profile_t default_profile;
int force_default_version_and_profile;
int forward_compatible;
glslang_messages_t messages;
const glslang_resource_t* resource;
} glslang_input_t;
/* Inclusion result structure allocated by C include_local/include_system callbacks */
typedef struct glsl_include_result_s {
/* Header file name or NULL if inclusion failed */
const char* header_name;
/* Header contents or NULL */
const char* header_data;
size_t header_length;
} glsl_include_result_t;
/* Callback for local file inclusion */
typedef glsl_include_result_t* (*glsl_include_local_func)(void* ctx, const char* header_name, const char* includer_name,
size_t include_depth);
/* Callback for system file inclusion */
typedef glsl_include_result_t* (*glsl_include_system_func)(void* ctx, const char* header_name,
const char* includer_name, size_t include_depth);
/* Callback for include result destruction */
typedef int (*glsl_free_include_result_func)(void* ctx, glsl_include_result_t* result);
/* Collection of callbacks for GLSL preprocessor */
typedef struct glsl_include_callbacks_s {
glsl_include_system_func include_system;
glsl_include_local_func include_local;
glsl_free_include_result_func free_include_result;
} glsl_include_callbacks_t;
#ifdef __cplusplus
extern "C" {
#endif
#ifdef GLSLANG_IS_SHARED_LIBRARY
#ifdef _WIN32
#ifdef GLSLANG_EXPORTING
#define GLSLANG_EXPORT __declspec(dllexport)
#else
#define GLSLANG_EXPORT __declspec(dllimport)
#endif
#elif __GNUC__ >= 4
#define GLSLANG_EXPORT __attribute__((visibility("default")))
#endif
#endif // GLSLANG_IS_SHARED_LIBRARY
#ifndef GLSLANG_EXPORT
#define GLSLANG_EXPORT
#endif
GLSLANG_EXPORT int glslang_initialize_process();
GLSLANG_EXPORT void glslang_finalize_process();
GLSLANG_EXPORT glslang_shader_t* glslang_shader_create(const glslang_input_t* input);
GLSLANG_EXPORT void glslang_shader_delete(glslang_shader_t* shader);
GLSLANG_EXPORT void glslang_shader_shift_binding(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base);
GLSLANG_EXPORT void glslang_shader_shift_binding_for_set(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base, unsigned int set);
GLSLANG_EXPORT void glslang_shader_set_options(glslang_shader_t* shader, int options); // glslang_shader_options_t
GLSLANG_EXPORT void glslang_shader_set_glsl_version(glslang_shader_t* shader, int version);
GLSLANG_EXPORT int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input);
GLSLANG_EXPORT int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input);
GLSLANG_EXPORT const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader);
GLSLANG_EXPORT const char* glslang_shader_get_info_log(glslang_shader_t* shader);
GLSLANG_EXPORT const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader);
GLSLANG_EXPORT glslang_program_t* glslang_program_create();
GLSLANG_EXPORT void glslang_program_delete(glslang_program_t* program);
GLSLANG_EXPORT void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader);
GLSLANG_EXPORT int glslang_program_link(glslang_program_t* program, int messages); // glslang_messages_t
GLSLANG_EXPORT int glslang_program_map_io(glslang_program_t* program);
GLSLANG_EXPORT void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage);
GLSLANG_EXPORT size_t glslang_program_SPIRV_get_size(glslang_program_t* program);
GLSLANG_EXPORT void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int*);
GLSLANG_EXPORT unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_SPIRV_get_messages(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_get_info_log(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_get_info_debug_log(glslang_program_t* program);
#ifdef __cplusplus
}
#endif
#endif /* #ifdef GLSLANG_C_IFACE_INCLUDED */

View File

@ -0,0 +1,211 @@
/**
This code is based on the glslang_c_interface implementation by Viktor Latypov
**/
/**
BSD 2-Clause License
Copyright (c) 2019, Viktor Latypov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#ifndef C_SHADER_TYPES_H_INCLUDED
#define C_SHADER_TYPES_H_INCLUDED
#define LAST_ELEMENT_MARKER(x) x
/* EShLanguage counterpart */
typedef enum {
GLSLANG_STAGE_VERTEX,
GLSLANG_STAGE_TESSCONTROL,
GLSLANG_STAGE_TESSEVALUATION,
GLSLANG_STAGE_GEOMETRY,
GLSLANG_STAGE_FRAGMENT,
GLSLANG_STAGE_COMPUTE,
GLSLANG_STAGE_RAYGEN_NV,
GLSLANG_STAGE_INTERSECT_NV,
GLSLANG_STAGE_ANYHIT_NV,
GLSLANG_STAGE_CLOSESTHIT_NV,
GLSLANG_STAGE_MISS_NV,
GLSLANG_STAGE_CALLABLE_NV,
GLSLANG_STAGE_TASK_NV,
GLSLANG_STAGE_MESH_NV,
LAST_ELEMENT_MARKER(GLSLANG_STAGE_COUNT),
} glslang_stage_t; // would be better as stage, but this is ancient now
/* EShLanguageMask counterpart */
typedef enum {
GLSLANG_STAGE_VERTEX_MASK = (1 << GLSLANG_STAGE_VERTEX),
GLSLANG_STAGE_TESSCONTROL_MASK = (1 << GLSLANG_STAGE_TESSCONTROL),
GLSLANG_STAGE_TESSEVALUATION_MASK = (1 << GLSLANG_STAGE_TESSEVALUATION),
GLSLANG_STAGE_GEOMETRY_MASK = (1 << GLSLANG_STAGE_GEOMETRY),
GLSLANG_STAGE_FRAGMENT_MASK = (1 << GLSLANG_STAGE_FRAGMENT),
GLSLANG_STAGE_COMPUTE_MASK = (1 << GLSLANG_STAGE_COMPUTE),
GLSLANG_STAGE_RAYGEN_NV_MASK = (1 << GLSLANG_STAGE_RAYGEN_NV),
GLSLANG_STAGE_INTERSECT_NV_MASK = (1 << GLSLANG_STAGE_INTERSECT_NV),
GLSLANG_STAGE_ANYHIT_NV_MASK = (1 << GLSLANG_STAGE_ANYHIT_NV),
GLSLANG_STAGE_CLOSESTHIT_NV_MASK = (1 << GLSLANG_STAGE_CLOSESTHIT_NV),
GLSLANG_STAGE_MISS_NV_MASK = (1 << GLSLANG_STAGE_MISS_NV),
GLSLANG_STAGE_CALLABLE_NV_MASK = (1 << GLSLANG_STAGE_CALLABLE_NV),
GLSLANG_STAGE_TASK_NV_MASK = (1 << GLSLANG_STAGE_TASK_NV),
GLSLANG_STAGE_MESH_NV_MASK = (1 << GLSLANG_STAGE_MESH_NV),
LAST_ELEMENT_MARKER(GLSLANG_STAGE_MASK_COUNT),
} glslang_stage_mask_t;
/* EShSource counterpart */
typedef enum {
GLSLANG_SOURCE_NONE,
GLSLANG_SOURCE_GLSL,
GLSLANG_SOURCE_HLSL,
LAST_ELEMENT_MARKER(GLSLANG_SOURCE_COUNT),
} glslang_source_t;
/* EShClient counterpart */
typedef enum {
GLSLANG_CLIENT_NONE,
GLSLANG_CLIENT_VULKAN,
GLSLANG_CLIENT_OPENGL,
LAST_ELEMENT_MARKER(GLSLANG_CLIENT_COUNT),
} glslang_client_t;
/* EShTargetLanguage counterpart */
typedef enum {
GLSLANG_TARGET_NONE,
GLSLANG_TARGET_SPV,
LAST_ELEMENT_MARKER(GLSLANG_TARGET_COUNT),
} glslang_target_language_t;
/* SH_TARGET_ClientVersion counterpart */
typedef enum {
GLSLANG_TARGET_VULKAN_1_0 = (1 << 22),
GLSLANG_TARGET_VULKAN_1_1 = (1 << 22) | (1 << 12),
GLSLANG_TARGET_VULKAN_1_2 = (1 << 22) | (2 << 12),
GLSLANG_TARGET_VULKAN_1_3 = (1 << 22) | (3 << 12),
GLSLANG_TARGET_OPENGL_450 = 450,
LAST_ELEMENT_MARKER(GLSLANG_TARGET_CLIENT_VERSION_COUNT = 5),
} glslang_target_client_version_t;
/* SH_TARGET_LanguageVersion counterpart */
typedef enum {
GLSLANG_TARGET_SPV_1_0 = (1 << 16),
GLSLANG_TARGET_SPV_1_1 = (1 << 16) | (1 << 8),
GLSLANG_TARGET_SPV_1_2 = (1 << 16) | (2 << 8),
GLSLANG_TARGET_SPV_1_3 = (1 << 16) | (3 << 8),
GLSLANG_TARGET_SPV_1_4 = (1 << 16) | (4 << 8),
GLSLANG_TARGET_SPV_1_5 = (1 << 16) | (5 << 8),
GLSLANG_TARGET_SPV_1_6 = (1 << 16) | (6 << 8),
LAST_ELEMENT_MARKER(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT = 7),
} glslang_target_language_version_t;
/* EShExecutable counterpart */
typedef enum { GLSLANG_EX_VERTEX_FRAGMENT, GLSLANG_EX_FRAGMENT } glslang_executable_t;
// EShOptimizationLevel counterpart
// This enum is not used in the current C interface, but could be added at a later date.
// GLSLANG_OPT_NONE is the current default.
typedef enum {
GLSLANG_OPT_NO_GENERATION,
GLSLANG_OPT_NONE,
GLSLANG_OPT_SIMPLE,
GLSLANG_OPT_FULL,
LAST_ELEMENT_MARKER(GLSLANG_OPT_LEVEL_COUNT),
} glslang_optimization_level_t;
/* EShTextureSamplerTransformMode counterpart */
typedef enum {
GLSLANG_TEX_SAMP_TRANS_KEEP,
GLSLANG_TEX_SAMP_TRANS_UPGRADE_TEXTURE_REMOVE_SAMPLER,
LAST_ELEMENT_MARKER(GLSLANG_TEX_SAMP_TRANS_COUNT),
} glslang_texture_sampler_transform_mode_t;
/* EShMessages counterpart */
typedef enum {
GLSLANG_MSG_DEFAULT_BIT = 0,
GLSLANG_MSG_RELAXED_ERRORS_BIT = (1 << 0),
GLSLANG_MSG_SUPPRESS_WARNINGS_BIT = (1 << 1),
GLSLANG_MSG_AST_BIT = (1 << 2),
GLSLANG_MSG_SPV_RULES_BIT = (1 << 3),
GLSLANG_MSG_VULKAN_RULES_BIT = (1 << 4),
GLSLANG_MSG_ONLY_PREPROCESSOR_BIT = (1 << 5),
GLSLANG_MSG_READ_HLSL_BIT = (1 << 6),
GLSLANG_MSG_CASCADING_ERRORS_BIT = (1 << 7),
GLSLANG_MSG_KEEP_UNCALLED_BIT = (1 << 8),
GLSLANG_MSG_HLSL_OFFSETS_BIT = (1 << 9),
GLSLANG_MSG_DEBUG_INFO_BIT = (1 << 10),
GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT = (1 << 11),
GLSLANG_MSG_HLSL_LEGALIZATION_BIT = (1 << 12),
GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT = (1 << 13),
GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT = (1 << 14),
GLSLANG_MSG_ENHANCED = (1 << 15),
LAST_ELEMENT_MARKER(GLSLANG_MSG_COUNT),
} glslang_messages_t;
/* EShReflectionOptions counterpart */
typedef enum {
GLSLANG_REFLECTION_DEFAULT_BIT = 0,
GLSLANG_REFLECTION_STRICT_ARRAY_SUFFIX_BIT = (1 << 0),
GLSLANG_REFLECTION_BASIC_ARRAY_SUFFIX_BIT = (1 << 1),
GLSLANG_REFLECTION_INTERMEDIATE_IOO_BIT = (1 << 2),
GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3),
GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4),
GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5),
GLSLANG_REFLECTION_ALL_IO_VARIABLES_BIT = (1 << 6),
GLSLANG_REFLECTION_SHARED_STD140_SSBO_BIT = (1 << 7),
GLSLANG_REFLECTION_SHARED_STD140_UBO_BIT = (1 << 8),
LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT),
} glslang_reflection_options_t;
/* EProfile counterpart (from Versions.h) */
typedef enum {
GLSLANG_BAD_PROFILE = 0,
GLSLANG_NO_PROFILE = (1 << 0),
GLSLANG_CORE_PROFILE = (1 << 1),
GLSLANG_COMPATIBILITY_PROFILE = (1 << 2),
GLSLANG_ES_PROFILE = (1 << 3),
LAST_ELEMENT_MARKER(GLSLANG_PROFILE_COUNT),
} glslang_profile_t;
/* Shader options */
typedef enum {
GLSLANG_SHADER_DEFAULT_BIT = 0,
GLSLANG_SHADER_AUTO_MAP_BINDINGS = (1 << 0),
GLSLANG_SHADER_AUTO_MAP_LOCATIONS = (1 << 1),
GLSLANG_SHADER_VULKAN_RULES_RELAXED = (1 << 2),
LAST_ELEMENT_MARKER(GLSLANG_SHADER_COUNT),
} glslang_shader_options_t;
/* TResourceType counterpart */
typedef enum {
GLSLANG_RESOURCE_TYPE_SAMPLER,
GLSLANG_RESOURCE_TYPE_TEXTURE,
GLSLANG_RESOURCE_TYPE_IMAGE,
GLSLANG_RESOURCE_TYPE_UBO,
GLSLANG_RESOURCE_TYPE_SSBO,
GLSLANG_RESOURCE_TYPE_UAV,
LAST_ELEMENT_MARKER(GLSLANG_RESOURCE_TYPE_COUNT),
} glslang_resource_type_t;
#undef LAST_ELEMENT_MARKER
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _INITIALIZE_INCLUDED_
#define _INITIALIZE_INCLUDED_
#include "../Include/ResourceLimits.h"
#include "../Include/Common.h"
#include "../Include/ShHandle.h"
#include "SymbolTable.h"
#include "Versions.h"
namespace glslang {
//
// This is made to hold parseable strings for almost all the built-in
// functions and variables for one specific combination of version
// and profile. (Some still need to be added programmatically.)
// This is a base class for language-specific derivations, which
// can be used for language independent builtins.
//
// The strings are organized by
// commonBuiltins: intersection of all stages' built-ins, processed just once
// stageBuiltins[]: anything a stage needs that's not in commonBuiltins
//
class TBuiltInParseables {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TBuiltInParseables();
virtual ~TBuiltInParseables();
virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0;
virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0;
virtual const TString& getCommonString() const { return commonBuiltins; }
virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; }
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0;
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0;
protected:
TString commonBuiltins;
TString stageBuiltins[EShLangCount];
};
//
// This is a GLSL specific derivation of TBuiltInParseables. To present a stable
// interface and match other similar code, it is called TBuiltIns, rather
// than TBuiltInParseablesGlsl.
//
class TBuiltIns : public TBuiltInParseables {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TBuiltIns();
virtual ~TBuiltIns();
void initialize(int version, EProfile, const SpvVersion& spvVersion);
void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion);
void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&);
void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile);
// Helpers for making textual representations of the permutations
// of texturing/imaging functions.
const char* postfixes[5];
const char* prefixes[EbtNumTypes];
int dimMap[EsdNumDims];
};
} // end namespace glslang
#endif // _INITIALIZE_INCLUDED_

View File

@ -0,0 +1,168 @@
//
// Copyright (C) 2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include "../Include/Common.h"
#include "reflection.h"
#include "localintermediate.h"
#include "gl_types.h"
#include <list>
#include <unordered_set>
namespace glslang {
//
// The traverser: mostly pass through, except
// - processing function-call nodes to push live functions onto the stack of functions to process
// - processing selection nodes to trim semantically dead code
//
// This is in the glslang namespace directly so it can be a friend of TReflection.
// This can be derived from to implement reflection database traversers or
// binding mappers: anything that wants to traverse the live subset of the tree.
//
class TLiveTraverser : public TIntermTraverser {
public:
TLiveTraverser(const TIntermediate& i, bool traverseAll = false,
bool preVisit = true, bool inVisit = false, bool postVisit = false) :
TIntermTraverser(preVisit, inVisit, postVisit),
intermediate(i), traverseAll(traverseAll)
{ }
//
// Given a function name, find its subroot in the tree, and push it onto the stack of
// functions left to process.
//
void pushFunction(const TString& name)
{
TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
for (unsigned int f = 0; f < globals.size(); ++f) {
TIntermAggregate* candidate = globals[f]->getAsAggregate();
if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
destinations.push_back(candidate);
break;
}
}
}
void pushGlobalReference(const TString& name)
{
TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
for (unsigned int f = 0; f < globals.size(); ++f) {
TIntermAggregate* candidate = globals[f]->getAsAggregate();
if (candidate && candidate->getOp() == EOpSequence &&
candidate->getSequence().size() == 1 &&
candidate->getSequence()[0]->getAsBinaryNode()) {
TIntermBinary* binary = candidate->getSequence()[0]->getAsBinaryNode();
TIntermSymbol* symbol = binary->getLeft()->getAsSymbolNode();
if (symbol && symbol->getQualifier().storage == EvqGlobal &&
symbol->getName() == name) {
destinations.push_back(candidate);
break;
}
}
}
}
typedef std::list<TIntermAggregate*> TDestinationStack;
TDestinationStack destinations;
protected:
// To catch which function calls are not dead, and hence which functions must be visited.
virtual bool visitAggregate(TVisit, TIntermAggregate* node)
{
if (!traverseAll)
if (node->getOp() == EOpFunctionCall)
addFunctionCall(node);
return true; // traverse this subtree
}
// To prune semantically dead paths.
virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node)
{
if (traverseAll)
return true; // traverse all code
TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion();
if (constant) {
// cull the path that is dead
if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock())
node->getTrueBlock()->traverse(this);
if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock())
node->getFalseBlock()->traverse(this);
return false; // don't traverse any more, we did it all above
} else
return true; // traverse the whole subtree
}
// Track live functions as well as uniforms, so that we don't visit dead functions
// and only visit each function once.
void addFunctionCall(TIntermAggregate* call)
{
// just use the map to ensure we process each function at most once
if (liveFunctions.find(call->getName()) == liveFunctions.end()) {
liveFunctions.insert(call->getName());
pushFunction(call->getName());
}
}
void addGlobalReference(const TString& name)
{
// just use the map to ensure we process each global at most once
if (liveGlobals.find(name) == liveGlobals.end()) {
liveGlobals.insert(name);
pushGlobalReference(name);
}
}
const TIntermediate& intermediate;
typedef std::unordered_set<TString> TLiveFunctions;
TLiveFunctions liveFunctions;
typedef std::unordered_set<TString> TLiveGlobals;
TLiveGlobals liveGlobals;
bool traverseAll;
private:
// prevent copy & copy construct
TLiveTraverser(TLiveTraverser&);
TLiveTraverser& operator=(TLiveTraverser&);
};
} // namespace glslang

View File

@ -0,0 +1,588 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// This header defines a two-level parse-helper hierarchy, derived from
// TParseVersions:
// - TParseContextBase: sharable across multiple parsers
// - TParseContext: GLSL specific helper
//
#ifndef _PARSER_HELPER_INCLUDED_
#define _PARSER_HELPER_INCLUDED_
#include <cstdarg>
#include <functional>
#include "parseVersions.h"
#include "../Include/ShHandle.h"
#include "SymbolTable.h"
#include "localintermediate.h"
#include "Scan.h"
#include "attribute.h"
namespace glslang {
struct TPragma {
TPragma(bool o, bool d) : optimize(o), debug(d) { }
bool optimize;
bool debug;
TPragmaTable pragmaTable;
};
class TScanContext;
class TPpContext;
typedef std::set<long long> TIdSetType;
typedef std::map<const TTypeList*, std::map<size_t, const TTypeList*>> TStructRecord;
//
// Sharable code (as well as what's in TParseVersions) across
// parse helpers.
//
class TParseContextBase : public TParseVersions {
public:
TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version,
EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
const TString* entryPoint = nullptr)
: TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
scopeMangler("::"),
symbolTable(symbolTable),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), blockNestingLevel(0), controlFlowNestingLevel(0),
currentFunctionType(nullptr),
postEntryPointReturn(false),
contextPragma(true, false),
beginInvocationInterlockCount(0), endInvocationInterlockCount(0),
parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
limits(resources.limits),
globalUniformBlock(nullptr),
globalUniformBinding(TQualifier::layoutBindingEnd),
globalUniformSet(TQualifier::layoutSetEnd),
atomicCounterBlockSet(TQualifier::layoutSetEnd)
{
if (entryPoint != nullptr)
sourceEntryPointName = *entryPoint;
}
virtual ~TParseContextBase() { }
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
#endif
virtual void setLimits(const TBuiltInResource&) = 0;
void checkIndex(const TSourceLoc&, const TType&, int& index);
EShLanguage getLanguage() const { return language; }
void setScanContext(TScanContext* c) { scanContext = c; }
TScanContext* getScanContext() const { return scanContext; }
void setPpContext(TPpContext* c) { ppContext = c; }
TPpContext* getPpContext() const { return ppContext; }
virtual void setLineCallback(const std::function<void(int, int, bool, int, const char*)>& func) { lineCallback = func; }
virtual void setExtensionCallback(const std::function<void(int, const char*, const char*)>& func) { extensionCallback = func; }
virtual void setVersionCallback(const std::function<void(int, int, const char*)>& func) { versionCallback = func; }
virtual void setPragmaCallback(const std::function<void(int, const TVector<TString>&)>& func) { pragmaCallback = func; }
virtual void setErrorCallback(const std::function<void(int, const char*)>& func) { errorCallback = func; }
virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0;
virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0;
virtual bool lineDirectiveShouldSetNextLine() const = 0;
virtual void handlePragma(const TSourceLoc&, const TVector<TString>&) = 0;
virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0;
virtual void notifyVersion(int line, int version, const char* type_string)
{
if (versionCallback)
versionCallback(line, version, type_string);
}
virtual void notifyErrorDirective(int line, const char* error_message)
{
if (errorCallback)
errorCallback(line, error_message);
}
virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName)
{
if (lineCallback)
lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName);
}
virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior)
{
if (extensionCallback)
extensionCallback(line, extension, behavior);
}
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
// Manage global buffer (used for backing atomic counters in GLSL when using relaxed Vulkan semantics)
virtual void growAtomicCounterBlock(int binding, const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
{
// Replace the entry point name given in the shader with the real entry point name,
// if there is a substitution.
if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0)
name = NewPoolTString(intermediate.getEntryPointName().c_str());
}
virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
const char* const scopeMangler;
// Basic parsing state, easily accessible to the grammar
TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
int statementNestingLevel; // 0 if outside all flow control or compound statements
int loopNestingLevel; // 0 if outside all loops
int structNestingLevel; // 0 if outside structures
int blockNestingLevel; // 0 if outside blocks
int controlFlowNestingLevel; // 0 if outside all flow control
const TType* currentFunctionType; // the return type of the function that's currently being parsed
bool functionReturnsValue; // true if a non-void function has a return
// if inside a function, true if the function is the entry point and this is after a return statement
bool postEntryPointReturn;
// case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting
TList<TIntermSequence*> switchSequenceStack;
// the statementNestingLevel the current switch statement is at, which must match the level of its case statements
TList<int> switchLevel;
struct TPragma contextPragma;
int beginInvocationInterlockCount;
int endInvocationInterlockCount;
protected:
TParseContextBase(TParseContextBase&);
TParseContextBase& operator=(TParseContextBase&);
const bool parsingBuiltins; // true if parsing built-in symbols/functions
TVector<TSymbol*> linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving
TScanContext* scanContext;
TPpContext* ppContext;
TBuiltInResource resources;
TLimits& limits;
TString sourceEntryPointName;
// These, if set, will be called when a line, pragma ... is preprocessed.
// They will be called with any parameters to the original directive.
std::function<void(int, int, bool, int, const char*)> lineCallback;
std::function<void(int, const TVector<TString>&)> pragmaCallback;
std::function<void(int, int, const char*)> versionCallback;
std::function<void(int, const char*, const char*)> extensionCallback;
std::function<void(int, const char*)> errorCallback;
// see implementation for detail
const TFunction* selectFunction(const TVector<const TFunction*>, const TFunction&,
std::function<bool(const TType&, const TType&, TOperator, int arg)>,
std::function<bool(const TType&, const TType&, const TType&)>,
/* output */ bool& tie);
virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size,
TSwizzleSelectors<TVectorSelector>&);
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
TVariable* globalUniformBlock; // the actual block, inserted into the symbol table
unsigned int globalUniformBinding; // the block's binding number
unsigned int globalUniformSet; // the block's set number
int firstNewMember; // the index of the first member not yet inserted into the symbol table
// override this to set the language-specific name
virtual const char* getGlobalUniformBlockName() const { return ""; }
virtual void setUniformBlockDefaults(TType&) const { }
virtual void finalizeGlobalUniformBlockLayout(TVariable&) {}
// Manage the atomic counter block (used for atomic_uints with Vulkan-Relaxed)
TMap<int, TVariable*> atomicCounterBuffers;
unsigned int atomicCounterBlockSet;
TMap<int, int> atomicCounterBlockFirstNewMember;
// override this to set the language-specific name
virtual const char* getAtomicCounterBlockName() const { return ""; }
virtual void setAtomicCounterBlockDefaults(TType&) const {}
virtual void setInvariant(const TSourceLoc&, const char*) {}
virtual void finalizeAtomicCounterBlockLayout(TVariable&) {}
bool isAtomicCounterBlock(const TSymbol& symbol) {
const TVariable* var = symbol.getAsVariable();
if (!var)
return false;
const auto& at = atomicCounterBuffers.find(var->getType().getQualifier().layoutBinding);
return (at != atomicCounterBuffers.end() && (*at).second->getType() == var->getType());
}
virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, TPrefixType prefix,
va_list args);
virtual void trackLinkage(TSymbol& symbol);
virtual void makeEditable(TSymbol*&);
virtual TVariable* getEditableVariable(const char* name);
virtual void finish();
};
//
// Manage the state for when to respect precision qualifiers and when to warn about
// the defaults being different than might be expected.
//
class TPrecisionManager {
public:
TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ }
virtual ~TPrecisionManager() {}
void respectPrecisionQualifiers() { obey = true; }
bool respectingPrecisionQualifiers() const { return obey; }
bool shouldWarnAboutDefaults() const { return warn; }
void defaultWarningGiven() { warn = false; }
void warnAboutDefaults() { warn = true; }
void explicitIntDefaultSeen()
{
explicitIntDefault = true;
if (explicitFloatDefault)
warn = false;
}
void explicitFloatDefaultSeen()
{
explicitFloatDefault = true;
if (explicitIntDefault)
warn = false;
}
protected:
bool obey; // respect precision qualifiers
bool warn; // need to give a warning about the defaults
bool explicitIntDefault; // user set the default for int/uint
bool explicitFloatDefault; // user set the default for float
};
//
// GLSL-specific parse helper. Should have GLSL in the name, but that's
// too big of a change for comparing branches at the moment, and perhaps
// impacts downstream consumers as well.
//
class TParseContext : public TParseContextBase {
public:
TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
bool forwardCompatible = false, EShMessages messages = EShMsgDefault,
const TString* entryPoint = nullptr);
virtual ~TParseContext();
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); }
void setPrecisionDefaults();
void setLimits(const TBuiltInResource&) override;
bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
void parserError(const char* s); // for bison's yyerror
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr) override;
virtual void growAtomicCounterBlock(int binding, const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr) override;
void reservedErrorCheck(const TSourceLoc&, const TString&);
void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override;
bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override;
bool lineDirectiveShouldSetNextLine() const override;
bool builtInName(const TString&);
void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string);
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
#ifndef GLSLANG_WEB
void makeEditable(TSymbol*&) override;
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
#endif
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
TIntermTyped* handleDotSwizzle(const TSourceLoc&, TIntermTyped* base, const TString& field);
void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName);
TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function);
void computeBuiltinPrecisions(TIntermTyped&, const TFunction&);
TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
void checkLocation(const TSourceLoc&, TOperator);
TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*);
void addInputArgumentConversions(const TFunction&, TIntermNode*&) const;
TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const;
TIntermTyped* addAssign(const TSourceLoc&, TOperator op, TIntermTyped* left, TIntermTyped* right);
void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&);
void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&);
void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*);
TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
TIntermTyped* vkRelaxedRemapFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
// returns true if the variable was remapped to something else
bool vkRelaxedRemapUniformVariable(const TSourceLoc&, TString&, const TPublicType&, TArraySizes*, TIntermTyped*, TType&);
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
void variableCheck(TIntermTyped*& nodePtr);
bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
void constantValueCheck(TIntermTyped* node, const char* token);
void integerCheck(const TIntermTyped* node, const char* token);
void globalCheck(const TSourceLoc&, const char* token);
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType);
bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
bool arrayError(const TSourceLoc&, const TType&);
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
void structArrayCheck(const TSourceLoc&, const TType& structure);
void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember);
void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*);
bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
void boolCheck(const TSourceLoc&, const TIntermTyped*);
void boolCheck(const TSourceLoc&, const TPublicType&);
void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
void memberQualifierCheck(glslang::TPublicType&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&, bool isMemberCheck = false);
void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);
void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier);
int computeSamplerTypeIndex(TSampler&);
TPrecisionQualifier getDefaultPrecision(TPublicType&);
void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&);
void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type);
bool containsFieldWithBasicType(const TType& type ,TBasicType basicType);
TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes);
void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type);
void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type);
void nestedBlockCheck(const TSourceLoc&);
void nestedStructCheck(const TSourceLoc&);
void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
void referenceCheck(const TSourceLoc&, const TType&, const char* op);
void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
void structTypeCheck(const TSourceLoc&, TPublicType&);
void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop);
void arrayLimitCheck(const TSourceLoc&, const TString&, int size);
void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature);
void inductiveLoopBodyCheck(TIntermNode*, long long loopIndexId, TSymbolTable&);
void constantIndexExpressionCheck(TIntermNode*);
void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&);
void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*);
void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
void layoutObjectCheck(const TSourceLoc&, const TSymbol&);
void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes);
void layoutTypeCheck(const TSourceLoc&, const TType&);
void layoutQualifierCheck(const TSourceLoc&, const TQualifier&);
void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
void fixOffset(const TSourceLoc&, TSymbol&);
const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
void declareTypeDefaults(const TSourceLoc&, const TPublicType&);
TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0);
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStorageRemap(const TSourceLoc&, const TString*, TQualifier&);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
void fixXfbOffsets(TQualifier&, TTypeList&);
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*);
void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*);
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
void invariantCheck(const TSourceLoc&, const TQualifier&);
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
TAttributes* makeAttributes(const TString& identifier) const;
TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
TAttributes* mergeAttributes(TAttributes*, TAttributes*) const;
// Determine selection control from attributes
void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
// Function attributes
void handleFunctionAttributes(const TSourceLoc&, const TAttributes&);
// GL_EXT_spirv_intrinsics
TSpirvRequirement* makeSpirvRequirement(const TSourceLoc& loc, const TString& name,
const TIntermAggregate* extensions, const TIntermAggregate* capabilities);
TSpirvRequirement* mergeSpirvRequirements(const TSourceLoc& loc, TSpirvRequirement* spirvReq1,
TSpirvRequirement* spirvReq2);
TSpirvTypeParameters* makeSpirvTypeParameters(const TSourceLoc& loc, const TIntermConstantUnion* constant);
TSpirvTypeParameters* mergeSpirvTypeParameters(TSpirvTypeParameters* spirvTypeParams1,
TSpirvTypeParameters* spirvTypeParams2);
TSpirvInstruction* makeSpirvInstruction(const TSourceLoc& loc, const TString& name, const TString& value);
TSpirvInstruction* makeSpirvInstruction(const TSourceLoc& loc, const TString& name, int value);
TSpirvInstruction* mergeSpirvInstruction(const TSourceLoc& loc, TSpirvInstruction* spirvInst1,
TSpirvInstruction* spirvInst2);
#endif
void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember);
protected:
void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
void inheritGlobalDefaults(TQualifier& dst) const;
TVariable* makeInternalVariable(const char* name, const TType&) const;
TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&);
void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&);
void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&);
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
#ifndef GLSLANG_WEB
void finish() override;
#endif
virtual const char* getGlobalUniformBlockName() const override;
virtual void finalizeGlobalUniformBlockLayout(TVariable&) override;
virtual void setUniformBlockDefaults(TType& block) const override;
virtual const char* getAtomicCounterBlockName() const override;
virtual void finalizeAtomicCounterBlockLayout(TVariable&) override;
virtual void setAtomicCounterBlockDefaults(TType& block) const override;
virtual void setInvariant(const TSourceLoc& loc, const char* builtin) override;
public:
//
// Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access
//
// Current state of parsing
bool inMain; // if inside a function, true if the function is main
const TString* blockName;
TQualifier currentBlockQualifier;
TPrecisionQualifier defaultPrecision[EbtNumTypes];
TBuiltInResource resources;
TLimits& limits;
protected:
TParseContext(TParseContext&);
TParseContext& operator=(TParseContext&);
static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex()
TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex];
TPrecisionManager precisionManager;
TQualifier globalBufferDefaults;
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
TQualifier globalSharedDefaults;
TString currentCaller; // name of last function body entered (not valid when at global scope)
#ifndef GLSLANG_WEB
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
TStructRecord matrixFixRecord;
TStructRecord packingFixRecord;
//
// Geometry shader input arrays:
// - array sizing is based on input primitive and/or explicit size
//
// Tessellation control output arrays:
// - array sizing is based on output layout(vertices=...) and/or explicit size
//
// Both:
// - array sizing is retroactive
// - built-in block redeclarations interact with this
//
// Design:
// - use a per-context "resize-list", a list of symbols whose array sizes
// can be fixed
//
// - the resize-list starts empty at beginning of user-shader compilation, it does
// not have built-ins in it
//
// - on built-in array use: copyUp() symbol and add it to the resize-list
//
// - on user array declaration: add it to the resize-list
//
// - on block redeclaration: copyUp() symbol and add it to the resize-list
// * note, that appropriately gives an error if redeclaring a block that
// was already used and hence already copied-up
//
// - on seeing a layout declaration that sizes the array, fix everything in the
// resize-list, giving errors for mismatch
//
// - on seeing an array size declaration, give errors on mismatch between it and previous
// array-sizing declarations
//
TVector<TSymbol*> ioArraySymbolResizeList;
#endif
};
} // end namespace glslang
#endif // _PARSER_HELPER_INCLUDED_

View File

@ -0,0 +1,41 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
namespace glslang {
void RemoveAllTreeNodes(TIntermNode*);
} // end namespace glslang

View File

@ -0,0 +1,276 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _GLSLANG_SCAN_INCLUDED_
#define _GLSLANG_SCAN_INCLUDED_
#include "Versions.h"
namespace glslang {
// Use a global end-of-input character, so no translation is needed across
// layers of encapsulation. Characters are all 8 bit, and positive, so there is
// no aliasing of character 255 onto -1, for example.
const int EndOfInput = -1;
//
// A character scanner that seamlessly, on read-only strings, reads across an
// array of strings without assuming null termination.
//
class TInputScanner {
public:
TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr,
int b = 0, int f = 0, bool single = false) :
numSources(n),
// up to this point, common usage is "char*", but now we need positive 8-bit characters
sources(reinterpret_cast<const unsigned char* const *>(s)),
lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single),
endOfFileReached(false)
{
loc = new TSourceLoc[numSources];
for (int i = 0; i < numSources; ++i) {
loc[i].init(i - stringBias);
}
if (names != nullptr) {
for (int i = 0; i < numSources; ++i)
loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr;
}
loc[currentSource].line = 1;
logicalSourceLoc.init(1);
logicalSourceLoc.name = loc[0].name;
}
virtual ~TInputScanner()
{
delete [] loc;
}
// retrieve the next character and advance one character
int get()
{
int ret = peek();
if (ret == EndOfInput)
return ret;
++loc[currentSource].column;
++logicalSourceLoc.column;
if (ret == '\n') {
++loc[currentSource].line;
++logicalSourceLoc.line;
logicalSourceLoc.column = 0;
loc[currentSource].column = 0;
}
advance();
return ret;
}
// retrieve the next character, no advance
int peek()
{
if (currentSource >= numSources) {
endOfFileReached = true;
return EndOfInput;
}
// Make sure we do not read off the end of a string.
// N.B. Sources can have a length of 0.
int sourceToRead = currentSource;
size_t charToRead = currentChar;
while(charToRead >= lengths[sourceToRead]) {
charToRead = 0;
sourceToRead += 1;
if (sourceToRead >= numSources) {
return EndOfInput;
}
}
// Here, we care about making negative valued characters positive
return sources[sourceToRead][charToRead];
}
// go back one character
void unget()
{
// Do not roll back once we've reached the end of the file.
if (endOfFileReached)
return;
if (currentChar > 0) {
--currentChar;
--loc[currentSource].column;
--logicalSourceLoc.column;
if (loc[currentSource].column < 0) {
// We've moved back past a new line. Find the
// previous newline (or start of the file) to compute
// the column count on the now current line.
size_t chIndex = currentChar;
while (chIndex > 0) {
if (sources[currentSource][chIndex] == '\n') {
break;
}
--chIndex;
}
logicalSourceLoc.column = (int)(currentChar - chIndex);
loc[currentSource].column = (int)(currentChar - chIndex);
}
} else {
do {
--currentSource;
} while (currentSource > 0 && lengths[currentSource] == 0);
if (lengths[currentSource] == 0) {
// set to 0 if we've backed up to the start of an empty string
currentChar = 0;
} else
currentChar = lengths[currentSource] - 1;
}
if (peek() == '\n') {
--loc[currentSource].line;
--logicalSourceLoc.line;
}
}
// for #line override
void setLine(int newLine)
{
logicalSourceLoc.line = newLine;
loc[getLastValidSourceIndex()].line = newLine;
}
// for #line override in filename based parsing
void setFile(const char* filename)
{
TString* fn_tstr = NewPoolTString(filename);
logicalSourceLoc.name = fn_tstr;
loc[getLastValidSourceIndex()].name = fn_tstr;
}
void setFile(const char* filename, int i)
{
TString* fn_tstr = NewPoolTString(filename);
if (i == getLastValidSourceIndex()) {
logicalSourceLoc.name = fn_tstr;
}
loc[i].name = fn_tstr;
}
void setString(int newString)
{
logicalSourceLoc.string = newString;
loc[getLastValidSourceIndex()].string = newString;
logicalSourceLoc.name = nullptr;
loc[getLastValidSourceIndex()].name = nullptr;
}
// for #include content indentation
void setColumn(int col)
{
logicalSourceLoc.column = col;
loc[getLastValidSourceIndex()].column = col;
}
void setEndOfInput()
{
endOfFileReached = true;
currentSource = numSources;
}
bool atEndOfInput() const { return endOfFileReached; }
const TSourceLoc& getSourceLoc() const
{
if (singleLogical) {
return logicalSourceLoc;
} else {
return loc[std::max(0, std::min(currentSource, numSources - finale - 1))];
}
}
// Returns the index (starting from 0) of the most recent valid source string we are reading from.
int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); }
void consumeWhiteSpace(bool& foundNonSpaceTab);
bool consumeComment();
void consumeWhitespaceComment(bool& foundNonSpaceTab);
bool scanVersion(int& version, EProfile& profile, bool& notFirstToken);
protected:
// advance one character
void advance()
{
++currentChar;
if (currentChar >= lengths[currentSource]) {
++currentSource;
if (currentSource < numSources) {
loc[currentSource].string = loc[currentSource - 1].string + 1;
loc[currentSource].line = 1;
loc[currentSource].column = 0;
}
while (currentSource < numSources && lengths[currentSource] == 0) {
++currentSource;
if (currentSource < numSources) {
loc[currentSource].string = loc[currentSource - 1].string + 1;
loc[currentSource].line = 1;
loc[currentSource].column = 0;
}
}
currentChar = 0;
}
}
int numSources; // number of strings in source
const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput
const size_t *lengths; // length of each string
int currentSource;
size_t currentChar;
// This is for reporting what string/line an error occurred on, and can be overridden by #line.
// It remembers the last state of each source string as it is left for the next one, so unget()
// can restore that state.
TSourceLoc* loc; // an array
int stringBias; // the first string that is the user's string number 0
int finale; // number of internal strings after user's last string
TSourceLoc logicalSourceLoc;
bool singleLogical; // treats the strings as a single logical string.
// locations will be reported from the first string.
// Set to true once peek() returns EndOfFile, so that we won't roll back
// once we've reached EndOfFile.
bool endOfFileReached;
};
} // end namespace glslang
#endif // _GLSLANG_SCAN_INCLUDED_

View File

@ -0,0 +1,93 @@
//
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// This holds context specific to the GLSL scanner, which
// sits between the preprocessor scanner and parser.
//
#pragma once
#include "ParseHelper.h"
namespace glslang {
class TPpContext;
class TPpToken;
class TParserToken;
class TScanContext {
public:
explicit TScanContext(TParseContextBase& pc) :
parseContext(pc),
afterType(false), afterStruct(false),
field(false), afterBuffer(false) { }
virtual ~TScanContext() { }
static void fillInKeywordMap();
static void deleteKeywordMap();
int tokenize(TPpContext*, TParserToken&);
protected:
TScanContext(TScanContext&);
TScanContext& operator=(TScanContext&);
int tokenizeIdentifier();
int identifierOrType();
int reservedWord();
int identifierOrReserved(bool reserved);
int es30ReservedFromGLSL(int version);
int nonreservedKeyword(int esVersion, int nonEsVersion);
int precisionKeyword();
int matNxM();
int dMat();
int firstGenerationImage(bool inEs310);
int secondGenerationImage();
TParseContextBase& parseContext;
bool afterType; // true if we've recognized a type, so can only be looking for an identifier
bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier
bool field; // true if we're on a field, right after a '.'
bool afterBuffer; // true if we've recognized the BUFFER keyword
TSourceLoc loc;
TParserToken* parserToken;
TPpToken* ppToken;
const char* tokenText;
int keyword;
};
} // end namespace glslang

View File

@ -0,0 +1,955 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _SYMBOL_TABLE_INCLUDED_
#define _SYMBOL_TABLE_INCLUDED_
//
// Symbol table for parsing. Has these design characteristics:
//
// * Same symbol table can be used to compile many shaders, to preserve
// effort of creating and loading with the large numbers of built-in
// symbols.
//
// --> This requires a copy mechanism, so initial pools used to create
// the shared information can be popped. Done through "clone"
// methods.
//
// * Name mangling will be used to give each function a unique name
// so that symbol table lookups are never ambiguous. This allows
// a simpler symbol table structure.
//
// * Pushing and popping of scope, so symbol table will really be a stack
// of symbol tables. Searched from the top, with new inserts going into
// the top.
//
// * Constants: Compile time constant symbols will keep their values
// in the symbol table. The parser can substitute constants at parse
// time, including doing constant folding and constant propagation.
//
// * No temporaries: Temporaries made from operations (+, --, .xy, etc.)
// are tracked in the intermediate representation, not the symbol table.
//
#include "../Include/Common.h"
#include "../Include/intermediate.h"
#include "../Include/InfoSink.h"
namespace glslang {
//
// Symbol base class. (Can build functions or variables out of these...)
//
class TVariable;
class TFunction;
class TAnonMember;
typedef TVector<const char*> TExtensionList;
class TSymbol {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
explicit TSymbol(const TString *n) : name(n), uniqueId(0), extensions(0), writable(true) { }
virtual TSymbol* clone() const = 0;
virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool
virtual const TString& getName() const { return *name; }
virtual void changeName(const TString* newName) { name = newName; }
virtual void addPrefix(const char* prefix)
{
TString newName(prefix);
newName.append(*name);
changeName(NewPoolTString(newName.c_str()));
}
virtual const TString& getMangledName() const { return getName(); }
virtual TFunction* getAsFunction() { return 0; }
virtual const TFunction* getAsFunction() const { return 0; }
virtual TVariable* getAsVariable() { return 0; }
virtual const TVariable* getAsVariable() const { return 0; }
virtual const TAnonMember* getAsAnonMember() const { return 0; }
virtual const TType& getType() const = 0;
virtual TType& getWritableType() = 0;
virtual void setUniqueId(long long id) { uniqueId = id; }
virtual long long getUniqueId() const { return uniqueId; }
virtual void setExtensions(int numExts, const char* const exts[])
{
assert(extensions == 0);
assert(numExts > 0);
extensions = NewPoolObject(extensions);
for (int e = 0; e < numExts; ++e)
extensions->push_back(exts[e]);
}
virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
virtual const char** getExtensions() const { return extensions->data(); }
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0;
void dumpExtensions(TInfoSink& infoSink) const;
#endif
virtual bool isReadOnly() const { return ! writable; }
virtual void makeReadOnly() { writable = false; }
protected:
explicit TSymbol(const TSymbol&);
TSymbol& operator=(const TSymbol&);
const TString *name;
unsigned long long uniqueId; // For cross-scope comparing during code generation
// For tracking what extensions must be present
// (don't use if correct version/profile is present).
TExtensionList* extensions; // an array of pointers to existing constant char strings
//
// N.B.: Non-const functions that will be generally used should assert on this,
// to avoid overwriting shared symbol-table information.
//
bool writable;
};
//
// Variable class, meaning a symbol that's not a function.
//
// There could be a separate class hierarchy for Constant variables;
// Only one of int, bool, or float, (or none) is correct for
// any particular use, but it's easy to do this way, and doesn't
// seem worth having separate classes, and "getConst" can't simply return
// different values for different types polymorphically, so this is
// just simple and pragmatic.
//
class TVariable : public TSymbol {
public:
TVariable(const TString *name, const TType& t, bool uT = false )
: TSymbol(name),
userType(uT),
constSubtree(nullptr),
memberExtensions(nullptr),
anonId(-1)
{ type.shallowCopy(t); }
virtual TVariable* clone() const;
virtual ~TVariable() { }
virtual TVariable* getAsVariable() { return this; }
virtual const TVariable* getAsVariable() const { return this; }
virtual const TType& getType() const { return type; }
virtual TType& getWritableType() { assert(writable); return type; }
virtual bool isUserType() const { return userType; }
virtual const TConstUnionArray& getConstArray() const { return constArray; }
virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; }
virtual void setConstArray(const TConstUnionArray& array) { constArray = array; }
virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
virtual TIntermTyped* getConstSubtree() const { return constSubtree; }
virtual void setAnonId(int i) { anonId = i; }
virtual int getAnonId() const { return anonId; }
virtual void setMemberExtensions(int member, int numExts, const char* const exts[])
{
assert(type.isStruct());
assert(numExts > 0);
if (memberExtensions == nullptr) {
memberExtensions = NewPoolObject(memberExtensions);
memberExtensions->resize(type.getStruct()->size());
}
for (int e = 0; e < numExts; ++e)
(*memberExtensions)[member].push_back(exts[e]);
}
virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; }
virtual int getNumMemberExtensions(int member) const
{
return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size();
}
virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
virtual void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
protected:
explicit TVariable(const TVariable&);
TVariable& operator=(const TVariable&);
TType type;
bool userType;
// we are assuming that Pool Allocator will free the memory allocated to unionArray
// when this object is destroyed
TConstUnionArray constArray; // for compile-time constant value
TIntermTyped* constSubtree; // for specialization constant computation
TVector<TExtensionList>* memberExtensions; // per-member extension list, allocated only when needed
int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
};
//
// The function sub-class of symbols and the parser will need to
// share this definition of a function parameter.
//
struct TParameter {
TString *name;
TType* type;
TIntermTyped* defaultValue;
void copyParam(const TParameter& param)
{
if (param.name)
name = NewPoolTString(param.name->c_str());
else
name = 0;
type = param.type->clone();
defaultValue = param.defaultValue;
}
TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; }
};
//
// The function sub-class of a symbol.
//
class TFunction : public TSymbol {
public:
explicit TFunction(TOperator o) :
TSymbol(0),
op(o),
defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { }
TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) :
TSymbol(name),
mangledName(*name + '('),
op(tOp),
defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0)
{
returnType.shallowCopy(retType);
declaredBuiltIn = retType.getQualifier().builtIn;
}
virtual TFunction* clone() const override;
virtual ~TFunction();
virtual TFunction* getAsFunction() override { return this; }
virtual const TFunction* getAsFunction() const override { return this; }
// Install 'p' as the (non-'this') last parameter.
// Non-'this' parameters are reflected in both the list of parameters and the
// mangled name.
virtual void addParameter(TParameter& p)
{
assert(writable);
parameters.push_back(p);
p.type->appendMangledName(mangledName);
if (p.defaultValue != nullptr)
defaultParamCount++;
}
// Install 'this' as the first parameter.
// 'this' is reflected in the list of parameters, but not the mangled name.
virtual void addThisParameter(TType& type, const char* name)
{
TParameter p = { NewPoolTString(name), new TType, nullptr };
p.type->shallowCopy(type);
parameters.insert(parameters.begin(), p);
}
virtual void addPrefix(const char* prefix) override
{
TSymbol::addPrefix(prefix);
mangledName.insert(0, prefix);
}
virtual void removePrefix(const TString& prefix)
{
assert(mangledName.compare(0, prefix.size(), prefix) == 0);
mangledName.erase(0, prefix.size());
}
virtual const TString& getMangledName() const override { return mangledName; }
virtual const TType& getType() const override { return returnType; }
virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; }
virtual TType& getWritableType() override { return returnType; }
virtual void relateToOperator(TOperator o) { assert(writable); op = o; }
virtual TOperator getBuiltInOp() const { return op; }
virtual void setDefined() { assert(writable); defined = true; }
virtual bool isDefined() const { return defined; }
virtual void setPrototyped() { assert(writable); prototyped = true; }
virtual bool isPrototyped() const { return prototyped; }
virtual void setImplicitThis() { assert(writable); implicitThis = true; }
virtual bool hasImplicitThis() const { return implicitThis; }
virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; }
virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; }
// Return total number of parameters
virtual int getParamCount() const { return static_cast<int>(parameters.size()); }
// Return number of parameters with default values.
virtual int getDefaultParamCount() const { return defaultParamCount; }
// Return number of fixed parameters (without default values)
virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); }
virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
virtual const TParameter& operator[](int i) const { return parameters[i]; }
#ifndef GLSLANG_WEB
virtual void setSpirvInstruction(const TSpirvInstruction& inst)
{
relateToOperator(EOpSpirvInst);
spirvInst = inst;
}
virtual const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
#endif
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TFunction(const TFunction&);
TFunction& operator=(const TFunction&);
typedef TVector<TParameter> TParamList;
TParamList parameters;
TType returnType;
TBuiltInVariable declaredBuiltIn;
TString mangledName;
TOperator op;
bool defined;
bool prototyped;
bool implicitThis; // True if this function is allowed to see all members of 'this'
bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this',
// even if it finds member variables in the symbol table.
// This is important for a static member function that has member variables in scope,
// but is not allowed to use them, or see hidden symbols instead.
int defaultParamCount;
#ifndef GLSLANG_WEB
TSpirvInstruction spirvInst; // SPIR-V instruction qualifiers
#endif
};
//
// Members of anonymous blocks are a kind of TSymbol. They are not hidden in
// the symbol table behind a container; rather they are visible and point to
// their anonymous container. (The anonymous container is found through the
// member, not the other way around.)
//
class TAnonMember : public TSymbol {
public:
TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
virtual TAnonMember* clone() const override;
virtual ~TAnonMember() { }
virtual const TAnonMember* getAsAnonMember() const override { return this; }
virtual const TVariable& getAnonContainer() const { return anonContainer; }
virtual unsigned int getMemberNumber() const { return memberNumber; }
virtual const TType& getType() const override
{
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual TType& getWritableType() override
{
assert(writable);
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual void setExtensions(int numExts, const char* const exts[]) override
{
anonContainer.setMemberExtensions(memberNumber, numExts, exts);
}
virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); }
virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
virtual int getAnonId() const { return anonId; }
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TAnonMember(const TAnonMember&);
TAnonMember& operator=(const TAnonMember&);
TVariable& anonContainer;
unsigned int memberNumber;
int anonId;
};
class TSymbolTableLevel {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { }
~TSymbolTableLevel();
bool insert(const TString& name, TSymbol* symbol) {
return level.insert(tLevelPair(name, symbol)).second;
}
bool insert(TSymbol& symbol, bool separateNameSpaces, const TString& forcedKeyName = TString())
{
//
// returning true means symbol was added to the table with no semantic errors
//
const TString& name = symbol.getName();
if (forcedKeyName.length()) {
return level.insert(tLevelPair(forcedKeyName, &symbol)).second;
}
else if (name == "") {
symbol.getAsVariable()->setAnonId(anonId++);
// An empty name means an anonymous container, exposing its members to the external scope.
// Give it a name and insert its members in the symbol table, pointing to the container.
char buf[20];
snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId());
symbol.changeName(NewPoolTString(buf));
return insertAnonymousMembers(symbol, 0);
} else {
// Check for redefinition errors:
// - STL itself will tell us if there is a direct name collision, with name mangling, at this level
// - additionally, check for function-redefining-variable name collisions
const TString& insertName = symbol.getMangledName();
if (symbol.getAsFunction()) {
// make sure there isn't a variable of this name
if (! separateNameSpaces && level.find(name) != level.end())
return false;
// insert, and whatever happens is okay
level.insert(tLevelPair(insertName, &symbol));
return true;
} else
return level.insert(tLevelPair(insertName, &symbol)).second;
}
}
// Add more members to an already inserted aggregate object
bool amend(TSymbol& symbol, int firstNewMember)
{
// See insert() for comments on basic explanation of insert.
// This operates similarly, but more simply.
// Only supporting amend of anonymous blocks so far.
if (IsAnonymous(symbol.getName()))
return insertAnonymousMembers(symbol, firstNewMember);
else
return false;
}
bool insertAnonymousMembers(TSymbol& symbol, int firstMember)
{
const TTypeList& types = *symbol.getAsVariable()->getType().getStruct();
for (unsigned int m = firstMember; m < types.size(); ++m) {
TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId());
if (! level.insert(tLevelPair(member->getMangledName(), member)).second)
return false;
}
return true;
}
void retargetSymbol(const TString& from, const TString& to) {
tLevel::const_iterator fromIt = level.find(from);
tLevel::const_iterator toIt = level.find(to);
if (fromIt == level.end() || toIt == level.end())
return;
delete fromIt->second;
level[from] = toIt->second;
retargetedSymbols.push_back({from, to});
}
TSymbol* find(const TString& name) const
{
tLevel::const_iterator it = level.find(name);
if (it == level.end())
return 0;
else
return (*it).second;
}
void findFunctionNameList(const TString& name, TVector<const TFunction*>& list)
{
size_t parenAt = name.find_first_of('(');
TString base(name, 0, parenAt + 1);
tLevel::const_iterator begin = level.lower_bound(base);
base[parenAt] = ')'; // assume ')' is lexically after '('
tLevel::const_iterator end = level.upper_bound(base);
for (tLevel::const_iterator it = begin; it != end; ++it)
list.push_back(it->second->getAsFunction());
}
// See if there is already a function in the table having the given non-function-style name.
bool hasFunctionName(const TString& name) const
{
tLevel::const_iterator candidate = level.lower_bound(name);
if (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0)
return true;
}
return false;
}
// See if there is a variable at this level having the given non-function-style name.
// Return true if name is found, and set variable to true if the name was a variable.
bool findFunctionVariableName(const TString& name, bool& variable) const
{
tLevel::const_iterator candidate = level.lower_bound(name);
if (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt == candidateName.npos) {
// not a mangled name
if (candidateName == name) {
// found a variable name match
variable = true;
return true;
}
} else {
// a mangled name
if (candidateName.compare(0, parenAt, name) == 0) {
// found a function name match
variable = false;
return true;
}
}
}
return false;
}
// Use this to do a lazy 'push' of precision defaults the first time
// a precision statement is seen in a new scope. Leave it at 0 for
// when no push was needed. Thus, it is not the current defaults,
// it is what to restore the defaults to when popping a level.
void setPreviousDefaultPrecisions(const TPrecisionQualifier *p)
{
// can call multiple times at one scope, will only latch on first call,
// as we're tracking the previous scope's values, not the current values
if (defaultPrecision != 0)
return;
defaultPrecision = new TPrecisionQualifier[EbtNumTypes];
for (int t = 0; t < EbtNumTypes; ++t)
defaultPrecision[t] = p[t];
}
void getPreviousDefaultPrecisions(TPrecisionQualifier *p)
{
// can be called for table level pops that didn't set the
// defaults
if (defaultPrecision == 0 || p == 0)
return;
for (int t = 0; t < EbtNumTypes; ++t)
p[t] = defaultPrecision[t];
}
void relateToOperator(const char* name, TOperator op);
void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
TSymbolTableLevel* clone() const;
void readOnly();
void setThisLevel() { thisLevel = true; }
bool isThisLevel() const { return thisLevel; }
protected:
explicit TSymbolTableLevel(TSymbolTableLevel&);
TSymbolTableLevel& operator=(TSymbolTableLevel&);
typedef std::map<TString, TSymbol*, std::less<TString>, pool_allocator<std::pair<const TString, TSymbol*> > > tLevel;
typedef const tLevel::value_type tLevelPair;
typedef std::pair<tLevel::iterator, bool> tInsertResult;
tLevel level; // named mappings
TPrecisionQualifier *defaultPrecision;
// pair<FromName, ToName>
TVector<std::pair<TString, TString>> retargetedSymbols;
int anonId;
bool thisLevel; // True if this level of the symbol table is a structure scope containing member function
// that are supposed to see anonymous access to member variables.
};
class TSymbolTable {
public:
TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0)
{
//
// This symbol table cannot be used until push() is called.
//
}
~TSymbolTable()
{
// this can be called explicitly; safest to code it so it can be called multiple times
// don't deallocate levels passed in from elsewhere
while (table.size() > adoptedLevels)
pop(0);
}
void adoptLevels(TSymbolTable& symTable)
{
for (unsigned int level = 0; level < symTable.table.size(); ++level) {
table.push_back(symTable.table[level]);
++adoptedLevels;
}
uniqueId = symTable.uniqueId;
noBuiltInRedeclarations = symTable.noBuiltInRedeclarations;
separateNameSpaces = symTable.separateNameSpaces;
}
//
// While level adopting is generic, the methods below enact a the following
// convention for levels:
// 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables
// 1: per-stage built-ins, shared across all compiles, but a different copy per stage
// 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins
// 3: user-shader globals
//
protected:
static const uint32_t LevelFlagBitOffset = 56;
static const int globalLevel = 3;
static bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
static bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
static bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
public:
bool isEmpty() { return table.size() == 0; }
bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); }
bool atGlobalLevel() { return isGlobalLevel(currentLevel()); }
static bool isBuiltInSymbol(long long uniqueId) {
int level = static_cast<int>(uniqueId >> LevelFlagBitOffset);
return isBuiltInLevel(level);
}
static constexpr uint64_t uniqueIdMask = (1LL << LevelFlagBitOffset) - 1;
static const uint32_t MaxLevelInUniqueID = 127;
void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
void setSeparateNameSpaces() { separateNameSpaces = true; }
void push()
{
table.push_back(new TSymbolTableLevel);
updateUniqueIdLevelFlag();
}
// Make a new symbol-table level to represent the scope introduced by a structure
// containing member functions, such that the member functions can find anonymous
// references to member variables.
//
// 'thisSymbol' should have a name of "" to trigger anonymous structure-member
// symbol finds.
void pushThis(TSymbol& thisSymbol)
{
assert(thisSymbol.getName().size() == 0);
table.push_back(new TSymbolTableLevel);
updateUniqueIdLevelFlag();
table.back()->setThisLevel();
insert(thisSymbol);
}
void pop(TPrecisionQualifier *p)
{
table[currentLevel()]->getPreviousDefaultPrecisions(p);
delete table.back();
table.pop_back();
updateUniqueIdLevelFlag();
}
//
// Insert a visible symbol into the symbol table so it can
// be found later by name.
//
// Returns false if the was a name collision.
//
bool insert(TSymbol& symbol)
{
symbol.setUniqueId(++uniqueId);
// make sure there isn't a function of this variable name
if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName()))
return false;
// check for not overloading or redefining a built-in function
if (noBuiltInRedeclarations) {
if (atGlobalLevel() && currentLevel() > 0) {
if (table[0]->hasFunctionName(symbol.getName()))
return false;
if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName()))
return false;
}
}
return table[currentLevel()]->insert(symbol, separateNameSpaces);
}
// Add more members to an already inserted aggregate object
bool amend(TSymbol& symbol, int firstNewMember)
{
// See insert() for comments on basic explanation of insert.
// This operates similarly, but more simply.
return table[currentLevel()]->amend(symbol, firstNewMember);
}
// Update the level info in symbol's unique ID to current level
void amendSymbolIdLevel(TSymbol& symbol)
{
// clamp level to avoid overflow
uint64_t level = (uint32_t)currentLevel() > MaxLevelInUniqueID ? MaxLevelInUniqueID : currentLevel();
uint64_t symbolId = symbol.getUniqueId();
symbolId &= uniqueIdMask;
symbolId |= (level << LevelFlagBitOffset);
symbol.setUniqueId(symbolId);
}
//
// To allocate an internal temporary, which will need to be uniquely
// identified by the consumer of the AST, but never need to
// found by doing a symbol table search by name, hence allowed an
// arbitrary name in the symbol with no worry of collision.
//
void makeInternalVariable(TSymbol& symbol)
{
symbol.setUniqueId(++uniqueId);
}
//
// Copy a variable or anonymous member's structure from a shared level so that
// it can be added (soon after return) to the symbol table where it can be
// modified without impacting other users of the shared table.
//
TSymbol* copyUpDeferredInsert(TSymbol* shared)
{
if (shared->getAsVariable()) {
TSymbol* copy = shared->clone();
copy->setUniqueId(shared->getUniqueId());
return copy;
} else {
const TAnonMember* anon = shared->getAsAnonMember();
assert(anon);
TVariable* container = anon->getAnonContainer().clone();
container->changeName(NewPoolTString(""));
container->setUniqueId(anon->getAnonContainer().getUniqueId());
return container;
}
}
TSymbol* copyUp(TSymbol* shared)
{
TSymbol* copy = copyUpDeferredInsert(shared);
table[globalLevel]->insert(*copy, separateNameSpaces);
if (shared->getAsVariable())
return copy;
else {
// return the copy of the anonymous member
return table[globalLevel]->find(shared->getName());
}
}
// Normal find of a symbol, that can optionally say whether the symbol was found
// at a built-in level or the current top-scope level.
TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0)
{
int level = currentLevel();
TSymbol* symbol;
int thisDepth = 0;
do {
if (table[level]->isThisLevel())
++thisDepth;
symbol = table[level]->find(name);
--level;
} while (symbol == nullptr && level >= 0);
level++;
if (builtIn)
*builtIn = isBuiltInLevel(level);
if (currentScope)
*currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals
if (thisDepthP != nullptr) {
if (! table[level]->isThisLevel())
thisDepth = 0;
*thisDepthP = thisDepth;
}
return symbol;
}
void retargetSymbol(const TString& from, const TString& to) {
int level = currentLevel();
table[level]->retargetSymbol(from, to);
}
// Find of a symbol that returns how many layers deep of nested
// structures-with-member-functions ('this' scopes) deep the symbol was
// found in.
TSymbol* find(const TString& name, int& thisDepth)
{
int level = currentLevel();
TSymbol* symbol;
thisDepth = 0;
do {
if (table[level]->isThisLevel())
++thisDepth;
symbol = table[level]->find(name);
--level;
} while (symbol == 0 && level >= 0);
if (! table[level + 1]->isThisLevel())
thisDepth = 0;
return symbol;
}
bool isFunctionNameVariable(const TString& name) const
{
if (separateNameSpaces)
return false;
int level = currentLevel();
do {
bool variable;
bool found = table[level]->findFunctionVariableName(name, variable);
if (found)
return variable;
--level;
} while (level >= 0);
return false;
}
void findFunctionNameList(const TString& name, TVector<const TFunction*>& list, bool& builtIn)
{
// For user levels, return the set found in the first scope with a match
builtIn = false;
int level = currentLevel();
do {
table[level]->findFunctionNameList(name, list);
--level;
} while (list.empty() && level >= globalLevel);
if (! list.empty())
return;
// Gather across all built-in levels; they don't hide each other
builtIn = true;
do {
table[level]->findFunctionNameList(name, list);
--level;
} while (level >= 0);
}
void relateToOperator(const char* name, TOperator op)
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->relateToOperator(name, op);
}
void setFunctionExtensions(const char* name, int num, const char* const extensions[])
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->setFunctionExtensions(name, num, extensions);
}
void setVariableExtensions(const char* name, int numExts, const char* const extensions[])
{
TSymbol* symbol = find(TString(name));
if (symbol == nullptr)
return;
symbol->setExtensions(numExts, extensions);
}
void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[])
{
TSymbol* symbol = find(TString(blockName));
if (symbol == nullptr)
return;
TVariable* variable = symbol->getAsVariable();
assert(variable != nullptr);
const TTypeList& structure = *variable->getAsVariable()->getType().getStruct();
for (int member = 0; member < (int)structure.size(); ++member) {
if (structure[member].type->getFieldName().compare(name) == 0) {
variable->setMemberExtensions(member, numExts, extensions);
return;
}
}
}
long long getMaxSymbolId() { return uniqueId; }
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
void copyTable(const TSymbolTable& copyOf);
void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }
void readOnly()
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->readOnly();
}
// Add current level in the high-bits of unique id
void updateUniqueIdLevelFlag() {
// clamp level to avoid overflow
uint64_t level = (uint32_t)currentLevel() > MaxLevelInUniqueID ? MaxLevelInUniqueID : currentLevel();
uniqueId &= uniqueIdMask;
uniqueId |= (level << LevelFlagBitOffset);
}
void overwriteUniqueId(long long id)
{
uniqueId = id;
updateUniqueIdLevelFlag();
}
protected:
TSymbolTable(TSymbolTable&);
TSymbolTable& operator=(TSymbolTableLevel&);
int currentLevel() const { return static_cast<int>(table.size()) - 1; }
std::vector<TSymbolTableLevel*> table;
long long uniqueId; // for unique identification in code generation
bool noBuiltInRedeclarations;
bool separateNameSpaces;
unsigned int adoptedLevels;
};
} // end namespace glslang
#endif // _SYMBOL_TABLE_INCLUDED_

View File

@ -0,0 +1,348 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2015-2018 Google, Inc.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _VERSIONS_INCLUDED_
#define _VERSIONS_INCLUDED_
#define LAST_ELEMENT_MARKER(x) x
//
// Help manage multiple profiles, versions, extensions etc.
//
//
// Profiles are set up for masking operations, so queries can be done on multiple
// profiles at the same time.
//
// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
// defects from mixing the two different forms.
//
typedef enum : unsigned {
EBadProfile = 0,
ENoProfile = (1 << 0), // only for desktop, before profiles showed up
ECoreProfile = (1 << 1),
ECompatibilityProfile = (1 << 2),
EEsProfile = (1 << 3),
LAST_ELEMENT_MARKER(EProfileCount),
} EProfile;
namespace glslang {
//
// Map from profile enum to externally readable text name.
//
inline const char* ProfileName(EProfile profile)
{
switch (profile) {
case ENoProfile: return "none";
case ECoreProfile: return "core";
case ECompatibilityProfile: return "compatibility";
case EEsProfile: return "es";
default: return "unknown profile";
}
}
//
// What source rules, validation rules, target language, etc. are needed or
// desired for SPIR-V?
//
// 0 means a target or rule set is not enabled (ignore rules from that entity).
// Non-0 means to apply semantic rules arising from that version of its rule set.
// The union of all requested rule sets will be applied.
//
struct SpvVersion {
SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0), vulkanRelaxed(false) {}
unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header
int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX"
int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use
int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX"
bool vulkanRelaxed; // relax changes to GLSL for Vulkan, allowing some GL-specific to be compiled to Vulkan SPIR-V target
};
//
// The behaviors from the GLSL "#extension extension_name : behavior"
//
typedef enum {
EBhMissing = 0,
EBhRequire,
EBhEnable,
EBhWarn,
EBhDisable,
EBhDisablePartial // use as initial state of an extension that is only partially implemented
} TExtensionBehavior;
//
// Symbolic names for extensions. Strings may be directly used when calling the
// functions, but better to have the compiler do spelling checks.
//
const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D";
const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives";
const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target";
const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle";
const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects";
const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack";
const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather";
const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5";
const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects";
const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader";
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_atomic_counter_ops = "GL_ARB_shader_atomic_counter_ops";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote";
const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control";
const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64";
const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export";
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock";
const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock";
const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object";
const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading";
const char* const E_GL_ARB_shader_bit_encoding = "GL_ARB_shader_bit_encoding";
const char* const E_GL_ARB_shader_image_size = "GL_ARB_shader_image_size";
const char* const E_GL_ARB_shader_storage_buffer_object = "GL_ARB_shader_storage_buffer_object";
const char* const E_GL_ARB_shading_language_packing = "GL_ARB_shading_language_packing";
const char* const E_GL_ARB_texture_query_lod = "GL_ARB_texture_query_lod";
const char* const E_GL_ARB_vertex_attrib_64bit = "GL_ARB_vertex_attrib_64bit";
const char* const E_GL_ARB_draw_instanced = "GL_ARB_draw_instanced";
const char* const E_GL_ARB_fragment_coord_conventions = "GL_ARB_fragment_coord_conventions";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic";
const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot";
const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle";
const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64";
const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
// EXT extensions
const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2";
const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation";
const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock";
const char* const E_GL_EXT_debug_printf = "GL_EXT_debug_printf";
const char* const E_GL_EXT_ray_tracing = "GL_EXT_ray_tracing";
const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query";
const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling";
const char* const E_GL_EXT_ray_cull_mask = "GL_EXT_ray_cull_mask";
const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended";
const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions";
const char* const E_GL_EXT_fragment_shading_rate = "GL_EXT_fragment_shading_rate";
const char* const E_GL_EXT_shader_image_int64 = "GL_EXT_shader_image_int64";
const char* const E_GL_EXT_null_initializer = "GL_EXT_null_initializer";
const char* const E_GL_EXT_shared_memory_block = "GL_EXT_shared_memory_block";
const char* const E_GL_EXT_subgroup_uniform_control_flow = "GL_EXT_subgroup_uniform_control_flow";
const char* const E_GL_EXT_spirv_intrinsics = "GL_EXT_spirv_intrinsics";
// Arrays of extensions for the above viewportEXTs duplications
const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage };
const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]);
// OVR extensions
const char* const E_GL_OVR_multiview = "GL_OVR_multiview";
const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2";
const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 };
const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]);
// #line and #include
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod";
const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16";
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2";
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2";
const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering";
const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes";
const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64";
const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image";
const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing";
const char* const E_GL_NV_ray_tracing_motion_blur = "GL_NV_ray_tracing_motion_blur";
const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric";
const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives";
const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint";
const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader";
// Arrays of extensions for the above viewportEXTs duplications
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";
const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix";
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced";
const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables";
const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic";
const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation";
const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array";
const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader";
const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size";
const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5";
const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box";
const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks";
const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader";
const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size";
const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer";
const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array";
const char* const E_GL_EXT_shader_integer_mix = "GL_EXT_shader_integer_mix";
// OES matching AEP
const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader";
const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size";
const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5";
const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box";
const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks";
const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader";
const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size";
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// EXT
const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8";
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
const char* const E_GL_EXT_terminate_invocation = "GL_EXT_terminate_invocation";
const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float";
const char* const E_GL_EXT_shader_atomic_float2 = "GL_EXT_shader_atomic_float2";
// Arrays of extensions for the above AEP duplications
const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]);
const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size };
const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]);
const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 };
const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]);
const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box };
const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]);
const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks };
const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]);
const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader };
const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]);
const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size };
const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]);
const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer };
const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]);
const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array };
const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]);
} // end namespace glslang
#endif // _VERSIONS_INCLUDED_

View File

@ -0,0 +1,150 @@
//
// Copyright (C) 2017 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _ATTRIBUTE_INCLUDED_
#define _ATTRIBUTE_INCLUDED_
#include "../Include/Common.h"
#include "../Include/ConstantUnion.h"
namespace glslang {
enum TAttributeType {
EatNone,
EatAllow_uav_condition,
EatBranch,
EatCall,
EatDomain,
EatEarlyDepthStencil,
EatFastOpt,
EatFlatten,
EatForceCase,
EatInstance,
EatMaxTessFactor,
EatNumThreads,
EatMaxVertexCount,
EatOutputControlPoints,
EatOutputTopology,
EatPartitioning,
EatPatchConstantFunc,
EatPatchSize,
EatUnroll,
EatLoop,
EatBinding,
EatGlobalBinding,
EatLocation,
EatInputAttachment,
EatBuiltIn,
EatPushConstant,
EatConstantId,
EatDependencyInfinite,
EatDependencyLength,
EatMinIterations,
EatMaxIterations,
EatIterationMultiple,
EatPeelCount,
EatPartialCount,
EatFormatRgba32f,
EatFormatRgba16f,
EatFormatR32f,
EatFormatRgba8,
EatFormatRgba8Snorm,
EatFormatRg32f,
EatFormatRg16f,
EatFormatR11fG11fB10f,
EatFormatR16f,
EatFormatRgba16,
EatFormatRgb10A2,
EatFormatRg16,
EatFormatRg8,
EatFormatR16,
EatFormatR8,
EatFormatRgba16Snorm,
EatFormatRg16Snorm,
EatFormatRg8Snorm,
EatFormatR16Snorm,
EatFormatR8Snorm,
EatFormatRgba32i,
EatFormatRgba16i,
EatFormatRgba8i,
EatFormatR32i,
EatFormatRg32i,
EatFormatRg16i,
EatFormatRg8i,
EatFormatR16i,
EatFormatR8i,
EatFormatRgba32ui,
EatFormatRgba16ui,
EatFormatRgba8ui,
EatFormatR32ui,
EatFormatRgb10a2ui,
EatFormatRg32ui,
EatFormatRg16ui,
EatFormatRg8ui,
EatFormatR16ui,
EatFormatR8ui,
EatFormatUnknown,
EatNonWritable,
EatNonReadable,
EatSubgroupUniformControlFlow,
};
class TIntermAggregate;
struct TAttributeArgs {
TAttributeType name;
const TIntermAggregate* args;
// Obtain attribute as integer
// Return false if it cannot be obtained
bool getInt(int& value, int argNum = 0) const;
// Obtain attribute as string, with optional to-lower transform
// Return false if it cannot be obtained
bool getString(TString& value, int argNum = 0, bool convertToLower = true) const;
// How many arguments were provided to the attribute?
int size() const;
protected:
const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const;
};
typedef TList<TAttributeArgs> TAttributes;
} // end namespace glslang
#endif // _ATTRIBUTE_INCLUDED_

View File

@ -0,0 +1,218 @@
/*
** Copyright (c) 2013 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#pragma once
#define GL_FLOAT 0x1406
#define GL_FLOAT_VEC2 0x8B50
#define GL_FLOAT_VEC3 0x8B51
#define GL_FLOAT_VEC4 0x8B52
#define GL_DOUBLE 0x140A
#define GL_DOUBLE_VEC2 0x8FFC
#define GL_DOUBLE_VEC3 0x8FFD
#define GL_DOUBLE_VEC4 0x8FFE
#define GL_INT 0x1404
#define GL_INT_VEC2 0x8B53
#define GL_INT_VEC3 0x8B54
#define GL_INT_VEC4 0x8B55
#define GL_UNSIGNED_INT 0x1405
#define GL_UNSIGNED_INT_VEC2 0x8DC6
#define GL_UNSIGNED_INT_VEC3 0x8DC7
#define GL_UNSIGNED_INT_VEC4 0x8DC8
#define GL_INT64_ARB 0x140E
#define GL_INT64_VEC2_ARB 0x8FE9
#define GL_INT64_VEC3_ARB 0x8FEA
#define GL_INT64_VEC4_ARB 0x8FEB
#define GL_UNSIGNED_INT64_ARB 0x140F
#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FF5
#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FF6
#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FF7
#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1
#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2
#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3
#define GL_INT16_NV 0x8FE4
#define GL_INT16_VEC2_NV 0x8FE5
#define GL_INT16_VEC3_NV 0x8FE6
#define GL_INT16_VEC4_NV 0x8FE7
#define GL_BOOL 0x8B56
#define GL_BOOL_VEC2 0x8B57
#define GL_BOOL_VEC3 0x8B58
#define GL_BOOL_VEC4 0x8B59
#define GL_FLOAT_MAT2 0x8B5A
#define GL_FLOAT_MAT3 0x8B5B
#define GL_FLOAT_MAT4 0x8B5C
#define GL_FLOAT_MAT2x3 0x8B65
#define GL_FLOAT_MAT2x4 0x8B66
#define GL_FLOAT_MAT3x2 0x8B67
#define GL_FLOAT_MAT3x4 0x8B68
#define GL_FLOAT_MAT4x2 0x8B69
#define GL_FLOAT_MAT4x3 0x8B6A
#define GL_DOUBLE_MAT2 0x8F46
#define GL_DOUBLE_MAT3 0x8F47
#define GL_DOUBLE_MAT4 0x8F48
#define GL_DOUBLE_MAT2x3 0x8F49
#define GL_DOUBLE_MAT2x4 0x8F4A
#define GL_DOUBLE_MAT3x2 0x8F4B
#define GL_DOUBLE_MAT3x4 0x8F4C
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
#define GL_FLOAT16_VEC3_NV 0x8FFA
#define GL_FLOAT16_VEC4_NV 0x8FFB
#define GL_FLOAT16_MAT2_AMD 0x91C5
#define GL_FLOAT16_MAT3_AMD 0x91C6
#define GL_FLOAT16_MAT4_AMD 0x91C7
#define GL_FLOAT16_MAT2x3_AMD 0x91C8
#define GL_FLOAT16_MAT2x4_AMD 0x91C9
#define GL_FLOAT16_MAT3x2_AMD 0x91CA
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E
#define GL_SAMPLER_3D 0x8B5F
#define GL_SAMPLER_CUBE 0x8B60
#define GL_SAMPLER_BUFFER 0x8DC2
#define GL_SAMPLER_1D_ARRAY 0x8DC0
#define GL_SAMPLER_2D_ARRAY 0x8DC1
#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
#define GL_SAMPLER_1D_SHADOW 0x8B61
#define GL_SAMPLER_2D_SHADOW 0x8B62
#define GL_SAMPLER_2D_RECT 0x8B63
#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D
#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1
#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2
#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3
#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4
#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5
#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6
#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7
#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8
#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9
#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA
#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB
#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC
#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD
#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE
#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF
#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0
#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1
#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2
#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3
#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4
#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5
#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6
#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7
#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
#define GL_INT_SAMPLER_1D 0x8DC9
#define GL_INT_SAMPLER_2D 0x8DCA
#define GL_INT_SAMPLER_3D 0x8DCB
#define GL_INT_SAMPLER_CUBE 0x8DCC
#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
#define GL_INT_SAMPLER_2D_RECT 0x8DCD
#define GL_INT_SAMPLER_BUFFER 0x8DD0
#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E
#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F
#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
#define GL_IMAGE_1D 0x904C
#define GL_IMAGE_2D 0x904D
#define GL_IMAGE_3D 0x904E
#define GL_IMAGE_2D_RECT 0x904F
#define GL_IMAGE_CUBE 0x9050
#define GL_IMAGE_BUFFER 0x9051
#define GL_IMAGE_1D_ARRAY 0x9052
#define GL_IMAGE_2D_ARRAY 0x9053
#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054
#define GL_IMAGE_2D_MULTISAMPLE 0x9055
#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056
#define GL_INT_IMAGE_1D 0x9057
#define GL_INT_IMAGE_2D 0x9058
#define GL_INT_IMAGE_3D 0x9059
#define GL_INT_IMAGE_2D_RECT 0x905A
#define GL_INT_IMAGE_CUBE 0x905B
#define GL_INT_IMAGE_BUFFER 0x905C
#define GL_INT_IMAGE_1D_ARRAY 0x905D
#define GL_INT_IMAGE_2D_ARRAY 0x905E
#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F
#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060
#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061
#define GL_UNSIGNED_INT_IMAGE_1D 0x9062
#define GL_UNSIGNED_INT_IMAGE_2D 0x9063
#define GL_UNSIGNED_INT_IMAGE_3D 0x9064
#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065
#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066
#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067
#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068
#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069
#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A
#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B
#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C
#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB

View File

@ -0,0 +1,568 @@
/* A Bison parser, made by GNU Bison 3.7.4. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation,
Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual,
especially those whose name start with YY_ or yy_. They are
private implementation details that can be changed or removed. */
#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
/* Debug traces. */
#ifndef YYDEBUG
# define YYDEBUG 1
#endif
#if YYDEBUG
extern int yydebug;
#endif
/* Token kinds. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
YYEMPTY = -2,
YYEOF = 0, /* "end of file" */
YYerror = 256, /* error */
YYUNDEF = 257, /* "invalid token" */
CONST = 258, /* CONST */
BOOL = 259, /* BOOL */
INT = 260, /* INT */
UINT = 261, /* UINT */
FLOAT = 262, /* FLOAT */
BVEC2 = 263, /* BVEC2 */
BVEC3 = 264, /* BVEC3 */
BVEC4 = 265, /* BVEC4 */
IVEC2 = 266, /* IVEC2 */
IVEC3 = 267, /* IVEC3 */
IVEC4 = 268, /* IVEC4 */
UVEC2 = 269, /* UVEC2 */
UVEC3 = 270, /* UVEC3 */
UVEC4 = 271, /* UVEC4 */
VEC2 = 272, /* VEC2 */
VEC3 = 273, /* VEC3 */
VEC4 = 274, /* VEC4 */
MAT2 = 275, /* MAT2 */
MAT3 = 276, /* MAT3 */
MAT4 = 277, /* MAT4 */
MAT2X2 = 278, /* MAT2X2 */
MAT2X3 = 279, /* MAT2X3 */
MAT2X4 = 280, /* MAT2X4 */
MAT3X2 = 281, /* MAT3X2 */
MAT3X3 = 282, /* MAT3X3 */
MAT3X4 = 283, /* MAT3X4 */
MAT4X2 = 284, /* MAT4X2 */
MAT4X3 = 285, /* MAT4X3 */
MAT4X4 = 286, /* MAT4X4 */
SAMPLER2D = 287, /* SAMPLER2D */
SAMPLER3D = 288, /* SAMPLER3D */
SAMPLERCUBE = 289, /* SAMPLERCUBE */
SAMPLER2DSHADOW = 290, /* SAMPLER2DSHADOW */
SAMPLERCUBESHADOW = 291, /* SAMPLERCUBESHADOW */
SAMPLER2DARRAY = 292, /* SAMPLER2DARRAY */
SAMPLER2DARRAYSHADOW = 293, /* SAMPLER2DARRAYSHADOW */
ISAMPLER2D = 294, /* ISAMPLER2D */
ISAMPLER3D = 295, /* ISAMPLER3D */
ISAMPLERCUBE = 296, /* ISAMPLERCUBE */
ISAMPLER2DARRAY = 297, /* ISAMPLER2DARRAY */
USAMPLER2D = 298, /* USAMPLER2D */
USAMPLER3D = 299, /* USAMPLER3D */
USAMPLERCUBE = 300, /* USAMPLERCUBE */
USAMPLER2DARRAY = 301, /* USAMPLER2DARRAY */
SAMPLER = 302, /* SAMPLER */
SAMPLERSHADOW = 303, /* SAMPLERSHADOW */
TEXTURE2D = 304, /* TEXTURE2D */
TEXTURE3D = 305, /* TEXTURE3D */
TEXTURECUBE = 306, /* TEXTURECUBE */
TEXTURE2DARRAY = 307, /* TEXTURE2DARRAY */
ITEXTURE2D = 308, /* ITEXTURE2D */
ITEXTURE3D = 309, /* ITEXTURE3D */
ITEXTURECUBE = 310, /* ITEXTURECUBE */
ITEXTURE2DARRAY = 311, /* ITEXTURE2DARRAY */
UTEXTURE2D = 312, /* UTEXTURE2D */
UTEXTURE3D = 313, /* UTEXTURE3D */
UTEXTURECUBE = 314, /* UTEXTURECUBE */
UTEXTURE2DARRAY = 315, /* UTEXTURE2DARRAY */
ATTRIBUTE = 316, /* ATTRIBUTE */
VARYING = 317, /* VARYING */
FLOAT16_T = 318, /* FLOAT16_T */
FLOAT32_T = 319, /* FLOAT32_T */
DOUBLE = 320, /* DOUBLE */
FLOAT64_T = 321, /* FLOAT64_T */
INT64_T = 322, /* INT64_T */
UINT64_T = 323, /* UINT64_T */
INT32_T = 324, /* INT32_T */
UINT32_T = 325, /* UINT32_T */
INT16_T = 326, /* INT16_T */
UINT16_T = 327, /* UINT16_T */
INT8_T = 328, /* INT8_T */
UINT8_T = 329, /* UINT8_T */
I64VEC2 = 330, /* I64VEC2 */
I64VEC3 = 331, /* I64VEC3 */
I64VEC4 = 332, /* I64VEC4 */
U64VEC2 = 333, /* U64VEC2 */
U64VEC3 = 334, /* U64VEC3 */
U64VEC4 = 335, /* U64VEC4 */
I32VEC2 = 336, /* I32VEC2 */
I32VEC3 = 337, /* I32VEC3 */
I32VEC4 = 338, /* I32VEC4 */
U32VEC2 = 339, /* U32VEC2 */
U32VEC3 = 340, /* U32VEC3 */
U32VEC4 = 341, /* U32VEC4 */
I16VEC2 = 342, /* I16VEC2 */
I16VEC3 = 343, /* I16VEC3 */
I16VEC4 = 344, /* I16VEC4 */
U16VEC2 = 345, /* U16VEC2 */
U16VEC3 = 346, /* U16VEC3 */
U16VEC4 = 347, /* U16VEC4 */
I8VEC2 = 348, /* I8VEC2 */
I8VEC3 = 349, /* I8VEC3 */
I8VEC4 = 350, /* I8VEC4 */
U8VEC2 = 351, /* U8VEC2 */
U8VEC3 = 352, /* U8VEC3 */
U8VEC4 = 353, /* U8VEC4 */
DVEC2 = 354, /* DVEC2 */
DVEC3 = 355, /* DVEC3 */
DVEC4 = 356, /* DVEC4 */
DMAT2 = 357, /* DMAT2 */
DMAT3 = 358, /* DMAT3 */
DMAT4 = 359, /* DMAT4 */
F16VEC2 = 360, /* F16VEC2 */
F16VEC3 = 361, /* F16VEC3 */
F16VEC4 = 362, /* F16VEC4 */
F16MAT2 = 363, /* F16MAT2 */
F16MAT3 = 364, /* F16MAT3 */
F16MAT4 = 365, /* F16MAT4 */
F32VEC2 = 366, /* F32VEC2 */
F32VEC3 = 367, /* F32VEC3 */
F32VEC4 = 368, /* F32VEC4 */
F32MAT2 = 369, /* F32MAT2 */
F32MAT3 = 370, /* F32MAT3 */
F32MAT4 = 371, /* F32MAT4 */
F64VEC2 = 372, /* F64VEC2 */
F64VEC3 = 373, /* F64VEC3 */
F64VEC4 = 374, /* F64VEC4 */
F64MAT2 = 375, /* F64MAT2 */
F64MAT3 = 376, /* F64MAT3 */
F64MAT4 = 377, /* F64MAT4 */
DMAT2X2 = 378, /* DMAT2X2 */
DMAT2X3 = 379, /* DMAT2X3 */
DMAT2X4 = 380, /* DMAT2X4 */
DMAT3X2 = 381, /* DMAT3X2 */
DMAT3X3 = 382, /* DMAT3X3 */
DMAT3X4 = 383, /* DMAT3X4 */
DMAT4X2 = 384, /* DMAT4X2 */
DMAT4X3 = 385, /* DMAT4X3 */
DMAT4X4 = 386, /* DMAT4X4 */
F16MAT2X2 = 387, /* F16MAT2X2 */
F16MAT2X3 = 388, /* F16MAT2X3 */
F16MAT2X4 = 389, /* F16MAT2X4 */
F16MAT3X2 = 390, /* F16MAT3X2 */
F16MAT3X3 = 391, /* F16MAT3X3 */
F16MAT3X4 = 392, /* F16MAT3X4 */
F16MAT4X2 = 393, /* F16MAT4X2 */
F16MAT4X3 = 394, /* F16MAT4X3 */
F16MAT4X4 = 395, /* F16MAT4X4 */
F32MAT2X2 = 396, /* F32MAT2X2 */
F32MAT2X3 = 397, /* F32MAT2X3 */
F32MAT2X4 = 398, /* F32MAT2X4 */
F32MAT3X2 = 399, /* F32MAT3X2 */
F32MAT3X3 = 400, /* F32MAT3X3 */
F32MAT3X4 = 401, /* F32MAT3X4 */
F32MAT4X2 = 402, /* F32MAT4X2 */
F32MAT4X3 = 403, /* F32MAT4X3 */
F32MAT4X4 = 404, /* F32MAT4X4 */
F64MAT2X2 = 405, /* F64MAT2X2 */
F64MAT2X3 = 406, /* F64MAT2X3 */
F64MAT2X4 = 407, /* F64MAT2X4 */
F64MAT3X2 = 408, /* F64MAT3X2 */
F64MAT3X3 = 409, /* F64MAT3X3 */
F64MAT3X4 = 410, /* F64MAT3X4 */
F64MAT4X2 = 411, /* F64MAT4X2 */
F64MAT4X3 = 412, /* F64MAT4X3 */
F64MAT4X4 = 413, /* F64MAT4X4 */
ATOMIC_UINT = 414, /* ATOMIC_UINT */
ACCSTRUCTNV = 415, /* ACCSTRUCTNV */
ACCSTRUCTEXT = 416, /* ACCSTRUCTEXT */
RAYQUERYEXT = 417, /* RAYQUERYEXT */
FCOOPMATNV = 418, /* FCOOPMATNV */
ICOOPMATNV = 419, /* ICOOPMATNV */
UCOOPMATNV = 420, /* UCOOPMATNV */
SAMPLERCUBEARRAY = 421, /* SAMPLERCUBEARRAY */
SAMPLERCUBEARRAYSHADOW = 422, /* SAMPLERCUBEARRAYSHADOW */
ISAMPLERCUBEARRAY = 423, /* ISAMPLERCUBEARRAY */
USAMPLERCUBEARRAY = 424, /* USAMPLERCUBEARRAY */
SAMPLER1D = 425, /* SAMPLER1D */
SAMPLER1DARRAY = 426, /* SAMPLER1DARRAY */
SAMPLER1DARRAYSHADOW = 427, /* SAMPLER1DARRAYSHADOW */
ISAMPLER1D = 428, /* ISAMPLER1D */
SAMPLER1DSHADOW = 429, /* SAMPLER1DSHADOW */
SAMPLER2DRECT = 430, /* SAMPLER2DRECT */
SAMPLER2DRECTSHADOW = 431, /* SAMPLER2DRECTSHADOW */
ISAMPLER2DRECT = 432, /* ISAMPLER2DRECT */
USAMPLER2DRECT = 433, /* USAMPLER2DRECT */
SAMPLERBUFFER = 434, /* SAMPLERBUFFER */
ISAMPLERBUFFER = 435, /* ISAMPLERBUFFER */
USAMPLERBUFFER = 436, /* USAMPLERBUFFER */
SAMPLER2DMS = 437, /* SAMPLER2DMS */
ISAMPLER2DMS = 438, /* ISAMPLER2DMS */
USAMPLER2DMS = 439, /* USAMPLER2DMS */
SAMPLER2DMSARRAY = 440, /* SAMPLER2DMSARRAY */
ISAMPLER2DMSARRAY = 441, /* ISAMPLER2DMSARRAY */
USAMPLER2DMSARRAY = 442, /* USAMPLER2DMSARRAY */
SAMPLEREXTERNALOES = 443, /* SAMPLEREXTERNALOES */
SAMPLEREXTERNAL2DY2YEXT = 444, /* SAMPLEREXTERNAL2DY2YEXT */
ISAMPLER1DARRAY = 445, /* ISAMPLER1DARRAY */
USAMPLER1D = 446, /* USAMPLER1D */
USAMPLER1DARRAY = 447, /* USAMPLER1DARRAY */
F16SAMPLER1D = 448, /* F16SAMPLER1D */
F16SAMPLER2D = 449, /* F16SAMPLER2D */
F16SAMPLER3D = 450, /* F16SAMPLER3D */
F16SAMPLER2DRECT = 451, /* F16SAMPLER2DRECT */
F16SAMPLERCUBE = 452, /* F16SAMPLERCUBE */
F16SAMPLER1DARRAY = 453, /* F16SAMPLER1DARRAY */
F16SAMPLER2DARRAY = 454, /* F16SAMPLER2DARRAY */
F16SAMPLERCUBEARRAY = 455, /* F16SAMPLERCUBEARRAY */
F16SAMPLERBUFFER = 456, /* F16SAMPLERBUFFER */
F16SAMPLER2DMS = 457, /* F16SAMPLER2DMS */
F16SAMPLER2DMSARRAY = 458, /* F16SAMPLER2DMSARRAY */
F16SAMPLER1DSHADOW = 459, /* F16SAMPLER1DSHADOW */
F16SAMPLER2DSHADOW = 460, /* F16SAMPLER2DSHADOW */
F16SAMPLER1DARRAYSHADOW = 461, /* F16SAMPLER1DARRAYSHADOW */
F16SAMPLER2DARRAYSHADOW = 462, /* F16SAMPLER2DARRAYSHADOW */
F16SAMPLER2DRECTSHADOW = 463, /* F16SAMPLER2DRECTSHADOW */
F16SAMPLERCUBESHADOW = 464, /* F16SAMPLERCUBESHADOW */
F16SAMPLERCUBEARRAYSHADOW = 465, /* F16SAMPLERCUBEARRAYSHADOW */
IMAGE1D = 466, /* IMAGE1D */
IIMAGE1D = 467, /* IIMAGE1D */
UIMAGE1D = 468, /* UIMAGE1D */
IMAGE2D = 469, /* IMAGE2D */
IIMAGE2D = 470, /* IIMAGE2D */
UIMAGE2D = 471, /* UIMAGE2D */
IMAGE3D = 472, /* IMAGE3D */
IIMAGE3D = 473, /* IIMAGE3D */
UIMAGE3D = 474, /* UIMAGE3D */
IMAGE2DRECT = 475, /* IMAGE2DRECT */
IIMAGE2DRECT = 476, /* IIMAGE2DRECT */
UIMAGE2DRECT = 477, /* UIMAGE2DRECT */
IMAGECUBE = 478, /* IMAGECUBE */
IIMAGECUBE = 479, /* IIMAGECUBE */
UIMAGECUBE = 480, /* UIMAGECUBE */
IMAGEBUFFER = 481, /* IMAGEBUFFER */
IIMAGEBUFFER = 482, /* IIMAGEBUFFER */
UIMAGEBUFFER = 483, /* UIMAGEBUFFER */
IMAGE1DARRAY = 484, /* IMAGE1DARRAY */
IIMAGE1DARRAY = 485, /* IIMAGE1DARRAY */
UIMAGE1DARRAY = 486, /* UIMAGE1DARRAY */
IMAGE2DARRAY = 487, /* IMAGE2DARRAY */
IIMAGE2DARRAY = 488, /* IIMAGE2DARRAY */
UIMAGE2DARRAY = 489, /* UIMAGE2DARRAY */
IMAGECUBEARRAY = 490, /* IMAGECUBEARRAY */
IIMAGECUBEARRAY = 491, /* IIMAGECUBEARRAY */
UIMAGECUBEARRAY = 492, /* UIMAGECUBEARRAY */
IMAGE2DMS = 493, /* IMAGE2DMS */
IIMAGE2DMS = 494, /* IIMAGE2DMS */
UIMAGE2DMS = 495, /* UIMAGE2DMS */
IMAGE2DMSARRAY = 496, /* IMAGE2DMSARRAY */
IIMAGE2DMSARRAY = 497, /* IIMAGE2DMSARRAY */
UIMAGE2DMSARRAY = 498, /* UIMAGE2DMSARRAY */
F16IMAGE1D = 499, /* F16IMAGE1D */
F16IMAGE2D = 500, /* F16IMAGE2D */
F16IMAGE3D = 501, /* F16IMAGE3D */
F16IMAGE2DRECT = 502, /* F16IMAGE2DRECT */
F16IMAGECUBE = 503, /* F16IMAGECUBE */
F16IMAGE1DARRAY = 504, /* F16IMAGE1DARRAY */
F16IMAGE2DARRAY = 505, /* F16IMAGE2DARRAY */
F16IMAGECUBEARRAY = 506, /* F16IMAGECUBEARRAY */
F16IMAGEBUFFER = 507, /* F16IMAGEBUFFER */
F16IMAGE2DMS = 508, /* F16IMAGE2DMS */
F16IMAGE2DMSARRAY = 509, /* F16IMAGE2DMSARRAY */
I64IMAGE1D = 510, /* I64IMAGE1D */
U64IMAGE1D = 511, /* U64IMAGE1D */
I64IMAGE2D = 512, /* I64IMAGE2D */
U64IMAGE2D = 513, /* U64IMAGE2D */
I64IMAGE3D = 514, /* I64IMAGE3D */
U64IMAGE3D = 515, /* U64IMAGE3D */
I64IMAGE2DRECT = 516, /* I64IMAGE2DRECT */
U64IMAGE2DRECT = 517, /* U64IMAGE2DRECT */
I64IMAGECUBE = 518, /* I64IMAGECUBE */
U64IMAGECUBE = 519, /* U64IMAGECUBE */
I64IMAGEBUFFER = 520, /* I64IMAGEBUFFER */
U64IMAGEBUFFER = 521, /* U64IMAGEBUFFER */
I64IMAGE1DARRAY = 522, /* I64IMAGE1DARRAY */
U64IMAGE1DARRAY = 523, /* U64IMAGE1DARRAY */
I64IMAGE2DARRAY = 524, /* I64IMAGE2DARRAY */
U64IMAGE2DARRAY = 525, /* U64IMAGE2DARRAY */
I64IMAGECUBEARRAY = 526, /* I64IMAGECUBEARRAY */
U64IMAGECUBEARRAY = 527, /* U64IMAGECUBEARRAY */
I64IMAGE2DMS = 528, /* I64IMAGE2DMS */
U64IMAGE2DMS = 529, /* U64IMAGE2DMS */
I64IMAGE2DMSARRAY = 530, /* I64IMAGE2DMSARRAY */
U64IMAGE2DMSARRAY = 531, /* U64IMAGE2DMSARRAY */
TEXTURECUBEARRAY = 532, /* TEXTURECUBEARRAY */
ITEXTURECUBEARRAY = 533, /* ITEXTURECUBEARRAY */
UTEXTURECUBEARRAY = 534, /* UTEXTURECUBEARRAY */
TEXTURE1D = 535, /* TEXTURE1D */
ITEXTURE1D = 536, /* ITEXTURE1D */
UTEXTURE1D = 537, /* UTEXTURE1D */
TEXTURE1DARRAY = 538, /* TEXTURE1DARRAY */
ITEXTURE1DARRAY = 539, /* ITEXTURE1DARRAY */
UTEXTURE1DARRAY = 540, /* UTEXTURE1DARRAY */
TEXTURE2DRECT = 541, /* TEXTURE2DRECT */
ITEXTURE2DRECT = 542, /* ITEXTURE2DRECT */
UTEXTURE2DRECT = 543, /* UTEXTURE2DRECT */
TEXTUREBUFFER = 544, /* TEXTUREBUFFER */
ITEXTUREBUFFER = 545, /* ITEXTUREBUFFER */
UTEXTUREBUFFER = 546, /* UTEXTUREBUFFER */
TEXTURE2DMS = 547, /* TEXTURE2DMS */
ITEXTURE2DMS = 548, /* ITEXTURE2DMS */
UTEXTURE2DMS = 549, /* UTEXTURE2DMS */
TEXTURE2DMSARRAY = 550, /* TEXTURE2DMSARRAY */
ITEXTURE2DMSARRAY = 551, /* ITEXTURE2DMSARRAY */
UTEXTURE2DMSARRAY = 552, /* UTEXTURE2DMSARRAY */
F16TEXTURE1D = 553, /* F16TEXTURE1D */
F16TEXTURE2D = 554, /* F16TEXTURE2D */
F16TEXTURE3D = 555, /* F16TEXTURE3D */
F16TEXTURE2DRECT = 556, /* F16TEXTURE2DRECT */
F16TEXTURECUBE = 557, /* F16TEXTURECUBE */
F16TEXTURE1DARRAY = 558, /* F16TEXTURE1DARRAY */
F16TEXTURE2DARRAY = 559, /* F16TEXTURE2DARRAY */
F16TEXTURECUBEARRAY = 560, /* F16TEXTURECUBEARRAY */
F16TEXTUREBUFFER = 561, /* F16TEXTUREBUFFER */
F16TEXTURE2DMS = 562, /* F16TEXTURE2DMS */
F16TEXTURE2DMSARRAY = 563, /* F16TEXTURE2DMSARRAY */
SUBPASSINPUT = 564, /* SUBPASSINPUT */
SUBPASSINPUTMS = 565, /* SUBPASSINPUTMS */
ISUBPASSINPUT = 566, /* ISUBPASSINPUT */
ISUBPASSINPUTMS = 567, /* ISUBPASSINPUTMS */
USUBPASSINPUT = 568, /* USUBPASSINPUT */
USUBPASSINPUTMS = 569, /* USUBPASSINPUTMS */
F16SUBPASSINPUT = 570, /* F16SUBPASSINPUT */
F16SUBPASSINPUTMS = 571, /* F16SUBPASSINPUTMS */
SPIRV_INSTRUCTION = 572, /* SPIRV_INSTRUCTION */
SPIRV_EXECUTION_MODE = 573, /* SPIRV_EXECUTION_MODE */
SPIRV_EXECUTION_MODE_ID = 574, /* SPIRV_EXECUTION_MODE_ID */
SPIRV_DECORATE = 575, /* SPIRV_DECORATE */
SPIRV_DECORATE_ID = 576, /* SPIRV_DECORATE_ID */
SPIRV_DECORATE_STRING = 577, /* SPIRV_DECORATE_STRING */
SPIRV_TYPE = 578, /* SPIRV_TYPE */
SPIRV_STORAGE_CLASS = 579, /* SPIRV_STORAGE_CLASS */
SPIRV_BY_REFERENCE = 580, /* SPIRV_BY_REFERENCE */
SPIRV_LITERAL = 581, /* SPIRV_LITERAL */
LEFT_OP = 582, /* LEFT_OP */
RIGHT_OP = 583, /* RIGHT_OP */
INC_OP = 584, /* INC_OP */
DEC_OP = 585, /* DEC_OP */
LE_OP = 586, /* LE_OP */
GE_OP = 587, /* GE_OP */
EQ_OP = 588, /* EQ_OP */
NE_OP = 589, /* NE_OP */
AND_OP = 590, /* AND_OP */
OR_OP = 591, /* OR_OP */
XOR_OP = 592, /* XOR_OP */
MUL_ASSIGN = 593, /* MUL_ASSIGN */
DIV_ASSIGN = 594, /* DIV_ASSIGN */
ADD_ASSIGN = 595, /* ADD_ASSIGN */
MOD_ASSIGN = 596, /* MOD_ASSIGN */
LEFT_ASSIGN = 597, /* LEFT_ASSIGN */
RIGHT_ASSIGN = 598, /* RIGHT_ASSIGN */
AND_ASSIGN = 599, /* AND_ASSIGN */
XOR_ASSIGN = 600, /* XOR_ASSIGN */
OR_ASSIGN = 601, /* OR_ASSIGN */
SUB_ASSIGN = 602, /* SUB_ASSIGN */
STRING_LITERAL = 603, /* STRING_LITERAL */
LEFT_PAREN = 604, /* LEFT_PAREN */
RIGHT_PAREN = 605, /* RIGHT_PAREN */
LEFT_BRACKET = 606, /* LEFT_BRACKET */
RIGHT_BRACKET = 607, /* RIGHT_BRACKET */
LEFT_BRACE = 608, /* LEFT_BRACE */
RIGHT_BRACE = 609, /* RIGHT_BRACE */
DOT = 610, /* DOT */
COMMA = 611, /* COMMA */
COLON = 612, /* COLON */
EQUAL = 613, /* EQUAL */
SEMICOLON = 614, /* SEMICOLON */
BANG = 615, /* BANG */
DASH = 616, /* DASH */
TILDE = 617, /* TILDE */
PLUS = 618, /* PLUS */
STAR = 619, /* STAR */
SLASH = 620, /* SLASH */
PERCENT = 621, /* PERCENT */
LEFT_ANGLE = 622, /* LEFT_ANGLE */
RIGHT_ANGLE = 623, /* RIGHT_ANGLE */
VERTICAL_BAR = 624, /* VERTICAL_BAR */
CARET = 625, /* CARET */
AMPERSAND = 626, /* AMPERSAND */
QUESTION = 627, /* QUESTION */
INVARIANT = 628, /* INVARIANT */
HIGH_PRECISION = 629, /* HIGH_PRECISION */
MEDIUM_PRECISION = 630, /* MEDIUM_PRECISION */
LOW_PRECISION = 631, /* LOW_PRECISION */
PRECISION = 632, /* PRECISION */
PACKED = 633, /* PACKED */
RESOURCE = 634, /* RESOURCE */
SUPERP = 635, /* SUPERP */
FLOATCONSTANT = 636, /* FLOATCONSTANT */
INTCONSTANT = 637, /* INTCONSTANT */
UINTCONSTANT = 638, /* UINTCONSTANT */
BOOLCONSTANT = 639, /* BOOLCONSTANT */
IDENTIFIER = 640, /* IDENTIFIER */
TYPE_NAME = 641, /* TYPE_NAME */
CENTROID = 642, /* CENTROID */
IN = 643, /* IN */
OUT = 644, /* OUT */
INOUT = 645, /* INOUT */
STRUCT = 646, /* STRUCT */
VOID = 647, /* VOID */
WHILE = 648, /* WHILE */
BREAK = 649, /* BREAK */
CONTINUE = 650, /* CONTINUE */
DO = 651, /* DO */
ELSE = 652, /* ELSE */
FOR = 653, /* FOR */
IF = 654, /* IF */
DISCARD = 655, /* DISCARD */
RETURN = 656, /* RETURN */
SWITCH = 657, /* SWITCH */
CASE = 658, /* CASE */
DEFAULT = 659, /* DEFAULT */
TERMINATE_INVOCATION = 660, /* TERMINATE_INVOCATION */
TERMINATE_RAY = 661, /* TERMINATE_RAY */
IGNORE_INTERSECTION = 662, /* IGNORE_INTERSECTION */
UNIFORM = 663, /* UNIFORM */
SHARED = 664, /* SHARED */
BUFFER = 665, /* BUFFER */
FLAT = 666, /* FLAT */
SMOOTH = 667, /* SMOOTH */
LAYOUT = 668, /* LAYOUT */
DOUBLECONSTANT = 669, /* DOUBLECONSTANT */
INT16CONSTANT = 670, /* INT16CONSTANT */
UINT16CONSTANT = 671, /* UINT16CONSTANT */
FLOAT16CONSTANT = 672, /* FLOAT16CONSTANT */
INT32CONSTANT = 673, /* INT32CONSTANT */
UINT32CONSTANT = 674, /* UINT32CONSTANT */
INT64CONSTANT = 675, /* INT64CONSTANT */
UINT64CONSTANT = 676, /* UINT64CONSTANT */
SUBROUTINE = 677, /* SUBROUTINE */
DEMOTE = 678, /* DEMOTE */
PAYLOADNV = 679, /* PAYLOADNV */
PAYLOADINNV = 680, /* PAYLOADINNV */
HITATTRNV = 681, /* HITATTRNV */
CALLDATANV = 682, /* CALLDATANV */
CALLDATAINNV = 683, /* CALLDATAINNV */
PAYLOADEXT = 684, /* PAYLOADEXT */
PAYLOADINEXT = 685, /* PAYLOADINEXT */
HITATTREXT = 686, /* HITATTREXT */
CALLDATAEXT = 687, /* CALLDATAEXT */
CALLDATAINEXT = 688, /* CALLDATAINEXT */
PATCH = 689, /* PATCH */
SAMPLE = 690, /* SAMPLE */
NONUNIFORM = 691, /* NONUNIFORM */
COHERENT = 692, /* COHERENT */
VOLATILE = 693, /* VOLATILE */
RESTRICT = 694, /* RESTRICT */
READONLY = 695, /* READONLY */
WRITEONLY = 696, /* WRITEONLY */
DEVICECOHERENT = 697, /* DEVICECOHERENT */
QUEUEFAMILYCOHERENT = 698, /* QUEUEFAMILYCOHERENT */
WORKGROUPCOHERENT = 699, /* WORKGROUPCOHERENT */
SUBGROUPCOHERENT = 700, /* SUBGROUPCOHERENT */
NONPRIVATE = 701, /* NONPRIVATE */
SHADERCALLCOHERENT = 702, /* SHADERCALLCOHERENT */
NOPERSPECTIVE = 703, /* NOPERSPECTIVE */
EXPLICITINTERPAMD = 704, /* EXPLICITINTERPAMD */
PERVERTEXNV = 705, /* PERVERTEXNV */
PERPRIMITIVENV = 706, /* PERPRIMITIVENV */
PERVIEWNV = 707, /* PERVIEWNV */
PERTASKNV = 708, /* PERTASKNV */
PRECISE = 709 /* PRECISE */
};
typedef enum yytokentype yytoken_kind_t;
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
union YYSTYPE
{
#line 97 "MachineIndependent/glslang.y"
struct {
glslang::TSourceLoc loc;
union {
glslang::TString *string;
int i;
unsigned int u;
long long i64;
unsigned long long u64;
bool b;
double d;
};
glslang::TSymbol* symbol;
} lex;
struct {
glslang::TSourceLoc loc;
glslang::TOperator op;
union {
TIntermNode* intermNode;
glslang::TIntermNodePair nodePair;
glslang::TIntermTyped* intermTypedNode;
glslang::TAttributes* attributes;
glslang::TSpirvRequirement* spirvReq;
glslang::TSpirvInstruction* spirvInst;
glslang::TSpirvTypeParameters* spirvTypeParams;
};
union {
glslang::TPublicType type;
glslang::TFunction* function;
glslang::TParameter param;
glslang::TTypeLoc typeLine;
glslang::TTypeList* typeList;
glslang::TArraySizes* arraySizes;
glslang::TIdentifierList* identifierList;
};
glslang::TArraySizes* typeParameters;
} interm;
#line 557 "MachineIndependent/glslang_tab.cpp.h"
};
typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1
#endif
int yyparse (glslang::TParseContext* pParseContext);
#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */

View File

@ -0,0 +1,361 @@
//
// Copyright (C) 2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
#ifndef _IOMAPPER_INCLUDED
#define _IOMAPPER_INCLUDED
#include <cstdint>
#include "LiveTraverser.h"
#include <unordered_map>
#include <unordered_set>
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
class TInfoSink;
namespace glslang {
class TIntermediate;
struct TVarEntryInfo {
long long id;
TIntermSymbol* symbol;
bool live;
int newBinding;
int newSet;
int newLocation;
int newComponent;
int newIndex;
EShLanguage stage;
void clearNewAssignments() {
newBinding = -1;
newSet = -1;
newLocation = -1;
newComponent = -1;
newIndex = -1;
}
struct TOrderById {
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { return l.id < r.id; }
};
struct TOrderByPriority {
// ordering:
// 1) has both binding and set
// 2) has binding but no set
// 3) has no binding but set
// 4) has no binding and no set
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) {
const TQualifier& lq = l.symbol->getQualifier();
const TQualifier& rq = r.symbol->getQualifier();
// simple rules:
// has binding gives 2 points
// has set gives 1 point
// who has the most points is more important.
int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
if (lPoints == rPoints)
return l.id < r.id;
return lPoints > rPoints;
}
};
struct TOrderByPriorityAndLive {
// ordering:
// 1) do live variables first
// 2) has both binding and set
// 3) has binding but no set
// 4) has no binding but set
// 5) has no binding and no set
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) {
const TQualifier& lq = l.symbol->getQualifier();
const TQualifier& rq = r.symbol->getQualifier();
// simple rules:
// has binding gives 2 points
// has set gives 1 point
// who has the most points is more important.
int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
if (l.live != r.live)
return l.live > r.live;
if (lPoints != rPoints)
return lPoints > rPoints;
return l.id < r.id;
}
};
};
// Base class for shared TIoMapResolver services, used by several derivations.
struct TDefaultIoResolverBase : public glslang::TIoMapResolver {
public:
TDefaultIoResolverBase(const TIntermediate& intermediate);
typedef std::vector<int> TSlotSet;
typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
// grow the reflection stage by stage
void notifyBinding(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void notifyInOut(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void beginNotifications(EShLanguage) override {}
void endNotifications(EShLanguage) override {}
void beginResolve(EShLanguage) override {}
void endResolve(EShLanguage) override {}
void beginCollect(EShLanguage) override {}
void endCollect(EShLanguage) override {}
void reserverResourceSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
void reserverStorageSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
int getBaseBinding(EShLanguage stage, TResourceType res, unsigned int set) const;
const std::vector<std::string>& getResourceSetBinding(EShLanguage stage) const;
virtual TResourceType getResourceType(const glslang::TType& type) = 0;
bool doAutoBindingMapping() const;
bool doAutoLocationMapping() const;
TSlotSet::iterator findSlot(int set, int slot);
bool checkEmpty(int set, int slot);
bool validateInOut(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
int reserveSlot(int set, int slot, int size = 1);
int getFreeSlot(int set, int base, int size = 1);
int resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void addStage(EShLanguage stage, TIntermediate& stageIntermediate) override {
if (stage < EShLangCount) {
stageMask[stage] = true;
stageIntermediates[stage] = &stageIntermediate;
}
}
uint32_t computeTypeLocationSize(const TType& type, EShLanguage stage);
TSlotSetMap slots;
bool hasError = false;
protected:
TDefaultIoResolverBase(TDefaultIoResolverBase&);
TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
const TIntermediate& referenceIntermediate;
int nextUniformLocation;
int nextInputLocation;
int nextOutputLocation;
bool stageMask[EShLangCount + 1];
const TIntermediate* stageIntermediates[EShLangCount];
// Return descriptor set specific base if there is one, and the generic base otherwise.
int selectBaseBinding(int base, int descriptorSetBase) const {
return descriptorSetBase != -1 ? descriptorSetBase : base;
}
static int getLayoutSet(const glslang::TType& type) {
if (type.getQualifier().hasSet())
return type.getQualifier().layoutSet;
else
return 0;
}
static bool isSamplerType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
}
static bool isTextureType(const glslang::TType& type) {
return (type.getBasicType() == glslang::EbtSampler &&
(type.getSampler().isTexture() || type.getSampler().isSubpass()));
}
static bool isUboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqUniform;
}
static bool isImageType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
}
static bool isSsboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a SRV (shader resource view) type:
static bool isSrvType(const glslang::TType& type) {
return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a UAV (unordered access view) type:
static bool isUavType(const glslang::TType& type) {
if (type.getQualifier().isReadOnly())
return false;
return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
(type.getQualifier().storage == EvqBuffer);
}
};
// Default I/O resolver for OpenGL
struct TDefaultGlslIoResolver : public TDefaultIoResolverBase {
public:
typedef std::map<TString, int> TVarSlotMap; // <resourceName, location/binding>
typedef std::map<int, TVarSlotMap> TSlotMap; // <resourceKey, TVarSlotMap>
TDefaultGlslIoResolver(const TIntermediate& intermediate);
bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
TResourceType getResourceType(const glslang::TType& type) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void beginResolve(EShLanguage /*stage*/) override;
void endResolve(EShLanguage stage) override;
void beginCollect(EShLanguage) override;
void endCollect(EShLanguage) override;
void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
// in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol.
// We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage.
// if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key.
// Note: both stage and type must less then 0xffff.
int buildStorageKey(EShLanguage stage, TStorageQualifier type) {
assert(static_cast<uint32_t>(stage) <= 0x0000ffff && static_cast<uint32_t>(type) <= 0x0000ffff);
return (stage << 16) | type;
}
protected:
// Use for mark pre stage, to get more interface symbol information.
EShLanguage preStage;
// Use for mark current shader stage for resolver
EShLanguage currentStage;
// Slot map for storage resource(location of uniform and interface symbol) It's a program share slot
TSlotMap resourceSlotMap;
// Slot map for other resource(image, ubo, ssbo), It's a program share slot.
TSlotMap storageSlotMap;
};
typedef std::map<TString, TVarEntryInfo> TVarLiveMap;
// override function "operator=", if a vector<const _Kty, _Ty> being sort,
// when use vc++, the sort function will call :
// pair& operator=(const pair<_Other1, _Other2>& _Right)
// {
// first = _Right.first;
// second = _Right.second;
// return (*this);
// }
// that will make a const type handing on left.
// override this function can avoid a compiler error.
// In the future, if the vc++ compiler can handle such a situation,
// this part of the code will be removed.
struct TVarLivePair : std::pair<const TString, TVarEntryInfo> {
TVarLivePair(const std::pair<const TString, TVarEntryInfo>& _Right) : pair(_Right.first, _Right.second) {}
TVarLivePair& operator=(const TVarLivePair& _Right) {
const_cast<TString&>(first) = _Right.first;
second = _Right.second;
return (*this);
}
TVarLivePair(const TVarLivePair& src) : pair(src) { }
};
typedef std::vector<TVarLivePair> TVarLiveVector;
// I/O mapper
class TIoMapper {
public:
TIoMapper() {}
virtual ~TIoMapper() {}
// grow the reflection stage by stage
bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; }
};
// I/O mapper for GLSL
class TGlslIoMapper : public TIoMapper {
public:
TGlslIoMapper() {
memset(inVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(outVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(intermediates, 0, sizeof(TIntermediate*) * (EShLangCount + 1));
profile = ENoProfile;
version = 0;
autoPushConstantMaxSize = 128;
autoPushConstantBlockPacking = ElpStd430;
}
virtual ~TGlslIoMapper() {
for (size_t stage = 0; stage < EShLangCount; stage++) {
if (inVarMaps[stage] != nullptr) {
delete inVarMaps[stage];
inVarMaps[stage] = nullptr;
}
if (outVarMaps[stage] != nullptr) {
delete outVarMaps[stage];
outVarMaps[stage] = nullptr;
}
if (uniformVarMap[stage] != nullptr) {
delete uniformVarMap[stage];
uniformVarMap[stage] = nullptr;
}
if (intermediates[stage] != nullptr)
intermediates[stage] = nullptr;
}
}
// If set, the uniform block with the given name will be changed to be backed by
// push_constant if it's size is <= maxSize
void setAutoPushConstantBlock(const char* name, unsigned int maxSize, TLayoutPacking packing) {
autoPushConstantBlockName = name;
autoPushConstantMaxSize = maxSize;
autoPushConstantBlockPacking = packing;
}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*) override;
bool doMap(TIoMapResolver*, TInfoSink&) override;
TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount],
*uniformVarMap[EShLangCount];
TIntermediate* intermediates[EShLangCount];
bool hadError = false;
EProfile profile;
int version;
private:
TString autoPushConstantBlockName;
unsigned int autoPushConstantMaxSize;
TLayoutPacking autoPushConstantBlockPacking;
};
} // end namespace glslang
#endif // _IOMAPPER_INCLUDED
#endif // !GLSLANG_WEB && !GLSLANG_ANGLE

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,245 @@
//
// Copyright (C) 2015-2018 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// This is implemented in Versions.cpp
#ifndef _PARSE_VERSIONS_INCLUDED_
#define _PARSE_VERSIONS_INCLUDED_
#include "../Public/ShaderLang.h"
#include "../Include/InfoSink.h"
#include "Scan.h"
#include <map>
namespace glslang {
//
// Base class for parse helpers.
// This just has version-related information and checking.
// This class should be sufficient for preprocessing.
//
class TParseVersions {
public:
TParseVersions(TIntermediate& interm, int version, EProfile profile,
const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
bool forwardCompatible, EShMessages messages)
:
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
forwardCompatible(forwardCompatible),
profile(profile),
#endif
infoSink(infoSink), version(version),
language(language),
spvVersion(spvVersion),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
virtual ~TParseVersions() { }
void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
#ifdef GLSLANG_WEB
const EProfile profile = EEsProfile;
bool isEsProfile() const { return true; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (EEsProfile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc)
{
if ((EEsProfile & profileMask) && (minVersion == 0 || version < minVersion))
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
void initializeExtensionBehavior() { }
void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { }
void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { }
void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; }
bool extensionTurnedOn(const char* const extension) { return false; }
bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; }
void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { }
void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { }
void checkExtensionStage(const TSourceLoc&, const char* const extension) { }
void extensionRequires(const TSourceLoc&, const char* const extension, const char* behavior) { }
void fullIntegerCheck(const TSourceLoc&, const char* op) { }
void doubleCheck(const TSourceLoc&, const char* op) { }
bool float16Arithmetic() { return false; }
void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int16Arithmetic() { return false; }
void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int8Arithmetic() { return false; }
void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
bool relaxedErrors() const { return false; }
bool suppressWarnings() const { return true; }
bool isForwardCompatible() const { return false; }
#else
#ifdef GLSLANG_ANGLE
const bool forwardCompatible = true;
const EProfile profile = ECoreProfile;
#else
bool forwardCompatible; // true if errors are to be given for use of deprecated features
EProfile profile; // the declared profile in the shader (core by default)
#endif
bool isEsProfile() const { return profile == EEsProfile; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc);
virtual void initializeExtensionBehavior();
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual TExtensionBehavior getExtensionBehavior(const char*);
virtual bool extensionTurnedOn(const char* const extension);
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
virtual void extensionRequires(const TSourceLoc&, const char* const extension, const char* behavior);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void doubleCheck(const TSourceLoc&, const char* op);
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool float16Arithmetic();
virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int16Arithmetic();
virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int8Arithmetic();
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void intcoopmatCheck(const TSourceLoc&, const char *op, bool builtIn = false);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
bool isForwardCompatible() const { return forwardCompatible; }
#endif // GLSLANG_WEB
virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op);
virtual void requireVulkan(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char *op, unsigned int version);
#if defined(GLSLANG_WEB) && !defined(GLSLANG_WEB_DEVEL)
void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
#else
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
#endif
void addError() { ++numErrors; }
int getNumErrors() const { return numErrors; }
void setScanner(TInputScanner* scanner) { currentScanner = scanner; }
TInputScanner* getScanner() const { return currentScanner; }
const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); }
void setCurrentLine(int line) { currentScanner->setLine(line); }
void setCurrentColumn(int col) { currentScanner->setColumn(col); }
void setCurrentSourceName(const char* name) { currentScanner->setFile(name); }
void setCurrentString(int string) { currentScanner->setString(string); }
void getPreamble(std::string&);
#ifdef ENABLE_HLSL
bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; }
#else
bool isReadingHLSL() const { return false; }
#endif
TInfoSink& infoSink;
// compilation mode
int version; // version, updated by #version in the shader
EShLanguage language; // really the stage
SpvVersion spvVersion;
TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
protected:
TMap<TString, TExtensionBehavior> extensionBehavior; // for each extension string, what its current behavior is
TMap<TString, unsigned int> extensionMinSpv; // for each extension string, store minimum spirv required
EShMessages messages; // errors/warnings/rule-sets
int numErrors; // number of compile-time errors encountered
TInputScanner* currentScanner;
private:
explicit TParseVersions(const TParseVersions&);
TParseVersions& operator=(const TParseVersions&);
};
} // end namespace glslang
#endif // _PARSE_VERSIONS_INCLUDED_

View File

@ -0,0 +1,703 @@
//
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#ifndef PPCONTEXT_H
#define PPCONTEXT_H
#include <stack>
#include <unordered_map>
#include <sstream>
#include "../ParseHelper.h"
#include "PpTokens.h"
/* windows only pragma */
#ifdef _MSC_VER
#pragma warning(disable : 4127)
#endif
namespace glslang {
class TPpToken {
public:
TPpToken() { clear(); }
void clear()
{
space = false;
i64val = 0;
loc.init();
name[0] = 0;
}
// Used for comparing macro definitions, so checks what is relevant for that.
bool operator==(const TPpToken& right) const
{
return space == right.space &&
ival == right.ival && dval == right.dval && i64val == right.i64val &&
strncmp(name, right.name, MaxTokenLength) == 0;
}
bool operator!=(const TPpToken& right) const { return ! operator==(right); }
TSourceLoc loc;
// True if a space (for white space or a removed comment) should also be
// recognized, in front of the token returned:
bool space;
// Numeric value of the token:
union {
int ival;
double dval;
long long i64val;
};
// Text string of the token:
char name[MaxTokenLength + 1];
};
class TStringAtomMap {
//
// Implementation is in PpAtom.cpp
//
// Maintain a bi-directional mapping between relevant preprocessor strings and
// "atoms" which a unique integers (small, contiguous, not hash-like) per string.
//
public:
TStringAtomMap();
// Map string -> atom.
// Return 0 if no existing string.
int getAtom(const char* s) const
{
auto it = atomMap.find(s);
return it == atomMap.end() ? 0 : it->second;
}
// Map a new or existing string -> atom, inventing a new atom if necessary.
int getAddAtom(const char* s)
{
int atom = getAtom(s);
if (atom == 0) {
atom = nextAtom++;
addAtomFixed(s, atom);
}
return atom;
}
// Map atom -> string.
const char* getString(int atom) const { return stringMap[atom]->c_str(); }
protected:
TStringAtomMap(TStringAtomMap&);
TStringAtomMap& operator=(TStringAtomMap&);
TUnorderedMap<TString, int> atomMap;
TVector<const TString*> stringMap; // these point into the TString in atomMap
int nextAtom;
// Bad source characters can lead to bad atoms, so gracefully handle those by
// pre-filling the table with them (to avoid if tests later).
TString badToken;
// Add bi-directional mappings:
// - string -> atom
// - atom -> string
void addAtomFixed(const char* s, int atom)
{
auto it = atomMap.insert(std::pair<TString, int>(s, atom)).first;
if (stringMap.size() < (size_t)atom + 1)
stringMap.resize(atom + 100, &badToken);
stringMap[atom] = &it->first;
}
};
class TInputScanner;
enum MacroExpandResult {
MacroExpandNotStarted, // macro not expanded, which might not be an error
MacroExpandError, // a clear error occurred while expanding, no expansion
MacroExpandStarted, // macro expansion process has started
MacroExpandUndef // macro is undefined and will be expanded
};
// This class is the result of turning a huge pile of C code communicating through globals
// into a class. This was done to allowing instancing to attain thread safety.
// Don't expect too much in terms of OO design.
class TPpContext {
public:
TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&);
virtual ~TPpContext();
void setPreamble(const char* preamble, size_t length);
int tokenize(TPpToken& ppToken);
int tokenPaste(int token, TPpToken&);
class tInput {
public:
tInput(TPpContext* p) : done(false), pp(p) { }
virtual ~tInput() { }
virtual int scan(TPpToken*) = 0;
virtual int getch() = 0;
virtual void ungetch() = 0;
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
virtual bool isMacroInput() { return false; }
// Will be called when we start reading tokens from this instance
virtual void notifyActivated() {}
// Will be called when we do not read tokens from this instance anymore
virtual void notifyDeleted() {}
protected:
bool done;
TPpContext* pp;
};
void setInput(TInputScanner& input, bool versionWillBeError);
void pushInput(tInput* in)
{
inputStack.push_back(in);
in->notifyActivated();
}
void popInput()
{
inputStack.back()->notifyDeleted();
delete inputStack.back();
inputStack.pop_back();
}
//
// From PpTokens.cpp
//
// Capture the needed parts of a token stream for macro recording/playback.
class TokenStream {
public:
// Manage a stream of these 'Token', which capture the relevant parts
// of a TPpToken, plus its atom.
class Token {
public:
Token(int atom, const TPpToken& ppToken) :
atom(atom),
space(ppToken.space),
i64val(ppToken.i64val),
name(ppToken.name) { }
int get(TPpToken& ppToken)
{
ppToken.clear();
ppToken.space = space;
ppToken.i64val = i64val;
snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
return atom;
}
bool isAtom(int a) const { return atom == a; }
int getAtom() const { return atom; }
bool nonSpaced() const { return !space; }
protected:
Token() {}
int atom;
bool space; // did a space precede the token?
long long i64val;
TString name;
};
TokenStream() : currentPos(0) { }
void putToken(int token, TPpToken* ppToken);
bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
bool peekContinuedPasting(int atom)
{
// This is basically necessary because, for example, the PP
// tokenizer only accepts valid numeric-literals plus suffixes, so
// separates numeric-literals plus bad suffix into two tokens, which
// should get both pasted together as one token when token pasting.
//
// The following code is a bit more generalized than the above example.
if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
switch(stream[currentPos].getAtom()) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
case PpAtomConstInt16:
case PpAtomConstUint16:
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
case PpAtomConstString:
case PpAtomIdentifier:
return true;
default:
break;
}
}
return false;
}
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return currentPos >= stream.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { currentPos = 0; }
protected:
TVector<Token> stream;
size_t currentPos;
};
//
// From Pp.cpp
//
struct MacroSymbol {
MacroSymbol() : functionLike(0), busy(0), undef(0) { }
TVector<int> args;
TokenStream body;
unsigned functionLike : 1; // 0 means object-like, 1 means function-like
unsigned busy : 1;
unsigned undef : 1;
};
typedef TMap<int, MacroSymbol> TSymbolMap;
TSymbolMap macroDefs; // map atoms to macro definitions
MacroSymbol* lookupMacroDef(int atom)
{
auto existingMacroIt = macroDefs.find(atom);
return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second);
}
void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; }
protected:
TPpContext(TPpContext&);
TPpContext& operator=(TPpContext&);
TStringAtomMap atomStrings;
char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble
int preambleLength;
char** strings; // official strings of shader, starting a string 0 line 1
size_t* lengths;
int numStrings; // how many official strings there are
int currentString; // which string we're currently parsing (-1 for preamble)
// Scanner data:
int previous_token;
TParseContextBase& parseContext;
// Get the next token from *stack* of input sources, popping input sources
// that are out of tokens, down until an input source is found that has a token.
// Return EndOfInput when there are no more tokens to be found by doing this.
int scanToken(TPpToken* ppToken)
{
int token = EndOfInput;
while (! inputStack.empty()) {
token = inputStack.back()->scan(ppToken);
if (token != EndOfInput || inputStack.empty())
break;
popInput();
}
return token;
}
int getChar() { return inputStack.back()->getch(); }
void ungetChar() { inputStack.back()->ungetch(); }
bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
bool peekContinuedPasting(int a)
{
return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
}
bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
static const int maxIfNesting = 65;
int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth
int elsetracker; // #if-#else and #endif constructs...Counter.
class tMacroInput : public tInput {
public:
tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { }
virtual ~tMacroInput()
{
for (size_t i = 0; i < args.size(); ++i)
delete args[i];
for (size_t i = 0; i < expandedArgs.size(); ++i)
delete expandedArgs[i];
}
virtual int scan(TPpToken*) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
bool endOfReplacementList() override { return mac->body.atEnd(); }
bool isMacroInput() override { return true; }
MacroSymbol *mac;
TVector<TokenStream*> args;
TVector<TokenStream*> expandedArgs;
protected:
bool prepaste; // true if we are just before ##
bool postpaste; // true if we are right after ##
};
class tMarkerInput : public tInput {
public:
tMarkerInput(TPpContext* pp) : tInput(pp) { }
virtual int scan(TPpToken*) override
{
if (done)
return EndOfInput;
done = true;
return marker;
}
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
static const int marker = -3;
};
class tZeroInput : public tInput {
public:
tZeroInput(TPpContext* pp) : tInput(pp) { }
virtual int scan(TPpToken*) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
};
std::vector<tInput*> inputStack;
bool errorOnVersion;
bool versionSeen;
//
// from Pp.cpp
//
// Used to obtain #include content.
TShader::Includer& includer;
int CPPdefine(TPpToken * ppToken);
int CPPundef(TPpToken * ppToken);
int CPPelse(int matchelse, TPpToken * ppToken);
int extraTokenCheck(int atom, TPpToken* ppToken, int token);
int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
int CPPif (TPpToken * ppToken);
int CPPifdef(int defined, TPpToken * ppToken);
int CPPinclude(TPpToken * ppToken);
int CPPline(TPpToken * ppToken);
int CPPerror(TPpToken * ppToken);
int CPPpragma(TPpToken * ppToken);
int CPPversion(TPpToken * ppToken);
int CPPextension(TPpToken * ppToken);
int readCPPline(TPpToken * ppToken);
int scanHeaderName(TPpToken* ppToken, char delimit);
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
//
// From PpTokens.cpp
//
void pushTokenStreamInput(TokenStream&, bool pasting = false);
void UngetToken(int token, TPpToken*);
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
tInput(pp),
tokens(t),
lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
protected:
TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
};
class tUngotTokenInput : public tInput {
public:
tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { }
virtual int scan(TPpToken *) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
protected:
int token;
TPpToken lval;
};
//
// From PpScanner.cpp
//
class tStringInput : public tInput {
public:
tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { }
virtual int scan(TPpToken*) override;
// Scanner used to get source stream characters.
// - Escaped newlines are handled here, invisibly to the caller.
// - All forms of newline are handled, and turned into just a '\n'.
int getch() override
{
int ch = input->get();
if (ch == '\\') {
// Move past escaped newlines, as many as sequentially exist
do {
if (input->peek() == '\r' || input->peek() == '\n') {
bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment);
if (! allowed && pp->inComment)
return '\\';
// escape one newline now
ch = input->get();
int nextch = input->get();
if (ch == '\r' && nextch == '\n')
ch = input->get();
else
ch = nextch;
} else
return '\\';
} while (ch == '\\');
}
// handle any non-escaped newline
if (ch == '\r' || ch == '\n') {
if (ch == '\r' && input->peek() == '\n')
input->get();
return '\n';
}
return ch;
}
// Scanner used to backup the source stream characters. Newlines are
// handled here, invisibly to the caller, meaning have to undo exactly
// what getch() above does (e.g., don't leave things in the middle of a
// sequence of escaped newlines).
void ungetch() override
{
input->unget();
do {
int ch = input->peek();
if (ch == '\r' || ch == '\n') {
if (ch == '\n') {
// correct for two-character newline
input->unget();
if (input->peek() != '\r')
input->get();
}
// now in front of a complete newline, move past an escape character
input->unget();
if (input->peek() == '\\')
input->unget();
else {
input->get();
break;
}
} else
break;
} while (true);
}
protected:
TInputScanner* input;
};
// Holds a reference to included file data, as well as a
// prologue and an epilogue string. This can be scanned using the tInput
// interface and acts as a single source string.
class TokenizableIncludeFile : public tInput {
public:
// Copies prologue and epilogue. The includedFile must remain valid
// until this TokenizableIncludeFile is no longer used.
TokenizableIncludeFile(const TSourceLoc& startLoc,
const std::string& prologue,
TShader::Includer::IncludeResult* includedFile,
const std::string& epilogue,
TPpContext* pp)
: tInput(pp),
prologue_(prologue),
epilogue_(epilogue),
includedFile_(includedFile),
scanner(3, strings, lengths, nullptr, 0, 0, true),
prevScanner(nullptr),
stringInput(pp, scanner)
{
strings[0] = prologue_.data();
strings[1] = includedFile_->headerData;
strings[2] = epilogue_.data();
lengths[0] = prologue_.size();
lengths[1] = includedFile_->headerLength;
lengths[2] = epilogue_.size();
scanner.setLine(startLoc.line);
scanner.setString(startLoc.string);
scanner.setFile(startLoc.getFilenameStr(), 0);
scanner.setFile(startLoc.getFilenameStr(), 1);
scanner.setFile(startLoc.getFilenameStr(), 2);
}
// tInput methods:
int scan(TPpToken* t) override { return stringInput.scan(t); }
int getch() override { return stringInput.getch(); }
void ungetch() override { stringInput.ungetch(); }
void notifyActivated() override
{
prevScanner = pp->parseContext.getScanner();
pp->parseContext.setScanner(&scanner);
pp->push_include(includedFile_);
}
void notifyDeleted() override
{
pp->parseContext.setScanner(prevScanner);
pp->pop_include();
}
private:
TokenizableIncludeFile& operator=(const TokenizableIncludeFile&);
// Stores the prologue for this string.
const std::string prologue_;
// Stores the epilogue for this string.
const std::string epilogue_;
// Points to the IncludeResult that this TokenizableIncludeFile represents.
TShader::Includer::IncludeResult* includedFile_;
// Will point to prologue_, includedFile_->headerData and epilogue_
// This is passed to scanner constructor.
// These do not own the storage and it must remain valid until this
// object has been destroyed.
const char* strings[3];
// Length of str_, passed to scanner constructor.
size_t lengths[3];
// Scans over str_.
TInputScanner scanner;
// The previous effective scanner before the scanner in this instance
// has been activated.
TInputScanner* prevScanner;
// Delegate object implementing the tInput interface.
tStringInput stringInput;
};
int ScanFromString(char* s);
void missingEndifCheck();
int lFloatConst(int len, int ch, TPpToken* ppToken);
int characterLiteral(TPpToken* ppToken);
void push_include(TShader::Includer::IncludeResult* result)
{
currentSourceFile = result->headerName;
includeStack.push(result);
}
void pop_include()
{
TShader::Includer::IncludeResult* include = includeStack.top();
includeStack.pop();
includer.releaseInclude(include);
if (includeStack.empty()) {
currentSourceFile = rootFileName;
} else {
currentSourceFile = includeStack.top()->headerName;
}
}
bool inComment;
std::string rootFileName;
std::stack<TShader::Includer::IncludeResult*> includeStack;
std::string currentSourceFile;
std::istringstream strtodStream;
bool disableEscapeSequences;
};
} // end namespace glslang
#endif // PPCONTEXT_H

View File

@ -0,0 +1,179 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#ifndef PARSER_H
#define PARSER_H
namespace glslang {
// Multi-character tokens
enum EFixedAtoms {
// single character tokens get their own char value as their token; start here for multi-character tokens
PpAtomMaxSingle = 127,
// replace bad character tokens with this, to avoid accidental aliasing with the below
PpAtomBadToken,
// Operators
PPAtomAddAssign,
PPAtomSubAssign,
PPAtomMulAssign,
PPAtomDivAssign,
PPAtomModAssign,
PpAtomRight,
PpAtomLeft,
PpAtomRightAssign,
PpAtomLeftAssign,
PpAtomAndAssign,
PpAtomOrAssign,
PpAtomXorAssign,
PpAtomAnd,
PpAtomOr,
PpAtomXor,
PpAtomEQ,
PpAtomNE,
PpAtomGE,
PpAtomLE,
PpAtomDecrement,
PpAtomIncrement,
PpAtomColonColon,
PpAtomPaste,
// Constants
PpAtomConstInt,
PpAtomConstUint,
PpAtomConstInt64,
PpAtomConstUint64,
PpAtomConstInt16,
PpAtomConstUint16,
PpAtomConstFloat,
PpAtomConstDouble,
PpAtomConstFloat16,
PpAtomConstString,
// Identifiers
PpAtomIdentifier,
// preprocessor "keywords"
PpAtomDefine,
PpAtomUndef,
PpAtomIf,
PpAtomIfdef,
PpAtomIfndef,
PpAtomElse,
PpAtomElif,
PpAtomEndif,
PpAtomLine,
PpAtomPragma,
PpAtomError,
// #version ...
PpAtomVersion,
PpAtomCore,
PpAtomCompatibility,
PpAtomEs,
// #extension
PpAtomExtension,
// __LINE__, __FILE__, __VERSION__
PpAtomLineMacro,
PpAtomFileMacro,
PpAtomVersionMacro,
// #include
PpAtomInclude,
PpAtomLast,
};
} // end namespace glslang
#endif /* not PARSER_H */

View File

@ -0,0 +1,55 @@
//
// Copyright (C) 2015-2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Visit the nodes in the glslang intermediate tree representation to
// propagate 'noContraction' qualifier.
//
#pragma once
#include "../Include/intermediate.h"
namespace glslang {
// Propagates the 'precise' qualifier for objects (objects marked with
// 'noContraction' qualifier) from the shader source specified 'precise'
// variables to all the involved objects, and add 'noContraction' qualifier for
// the involved arithmetic operations.
// Note that the same qualifier: 'noContraction' is used in both object nodes
// and arithmetic operation nodes, but has different meaning. For object nodes,
// 'noContraction' means the object is 'precise'; and for arithmetic operation
// nodes, it means the operation should not be contracted.
void PropagateNoContraction(const glslang::TIntermediate& intermediate);
};

View File

@ -0,0 +1,223 @@
//
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
#ifndef _REFLECTION_INCLUDED
#define _REFLECTION_INCLUDED
#include "../Public/ShaderLang.h"
#include "../Include/Types.h"
#include <list>
#include <set>
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
namespace glslang {
class TIntermediate;
class TIntermAggregate;
class TReflectionTraverser;
// The full reflection database
class TReflection {
public:
TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last)
: options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection())
{
for (int dim=0; dim<3; ++dim)
localSize[dim] = 0;
}
virtual ~TReflection() {}
// grow the reflection stage by stage
bool addStage(EShLanguage, const TIntermediate&);
// for mapping a uniform index to a uniform object's description
int getNumUniforms() { return (int)indexToUniform.size(); }
const TObjectReflection& getUniform(int i) const
{
if (i >= 0 && i < (int)indexToUniform.size())
return indexToUniform[i];
else
return badReflection;
}
// for mapping a block index to the block's description
int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); }
const TObjectReflection& getUniformBlock(int i) const
{
if (i >= 0 && i < (int)indexToUniformBlock.size())
return indexToUniformBlock[i];
else
return badReflection;
}
// for mapping an pipeline input index to the input's description
int getNumPipeInputs() { return (int)indexToPipeInput.size(); }
const TObjectReflection& getPipeInput(int i) const
{
if (i >= 0 && i < (int)indexToPipeInput.size())
return indexToPipeInput[i];
else
return badReflection;
}
// for mapping an pipeline output index to the output's description
int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); }
const TObjectReflection& getPipeOutput(int i) const
{
if (i >= 0 && i < (int)indexToPipeOutput.size())
return indexToPipeOutput[i];
else
return badReflection;
}
// for mapping from an atomic counter to the uniform index
int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); }
const TObjectReflection& getAtomicCounter(int i) const
{
if (i >= 0 && i < (int)atomicCounterUniformIndices.size())
return getUniform(atomicCounterUniformIndices[i]);
else
return badReflection;
}
// for mapping a buffer variable index to a buffer variable object's description
int getNumBufferVariables() { return (int)indexToBufferVariable.size(); }
const TObjectReflection& getBufferVariable(int i) const
{
if (i >= 0 && i < (int)indexToBufferVariable.size())
return indexToBufferVariable[i];
else
return badReflection;
}
// for mapping a storage block index to the storage block's description
int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); }
const TObjectReflection& getStorageBufferBlock(int i) const
{
if (i >= 0 && i < (int)indexToBufferBlock.size())
return indexToBufferBlock[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and input/output names)
int getIndex(const char* name) const
{
TNameToIndex::const_iterator it = nameToIndex.find(name);
if (it == nameToIndex.end())
return -1;
else
return it->second;
}
// see getIndex(const char*)
int getIndex(const TString& name) const { return getIndex(name.c_str()); }
// for mapping any name to its index (only pipe input/output names)
int getPipeIOIndex(const char* name, const bool inOrOut) const
{
TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name);
if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end()))
return -1;
else
return it->second;
}
// see gePipeIOIndex(const char*, const bool)
int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); }
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
void dump();
protected:
friend class glslang::TReflectionTraverser;
void buildCounterIndices(const TIntermediate&);
void buildUniformStageMask(const TIntermediate& intermediate);
void buildAttributeReflection(EShLanguage, const TIntermediate&);
// Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
typedef std::map<std::string, int> TNameToIndex;
typedef std::vector<TObjectReflection> TMapIndexToReflection;
typedef std::vector<int> TIndices;
TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferBlock;
return indexToUniformBlock;
}
TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferVariable;
return indexToUniform;
}
EShReflectionOptions options;
EShLanguage firstStage;
EShLanguage lastStage;
TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TMapIndexToReflection indexToUniform;
TMapIndexToReflection indexToUniformBlock;
TMapIndexToReflection indexToBufferVariable;
TMapIndexToReflection indexToBufferBlock;
TMapIndexToReflection indexToPipeInput;
TMapIndexToReflection indexToPipeOutput;
TIndices atomicCounterUniformIndices;
unsigned int localSize[3];
};
} // end namespace glslang
#endif // _REFLECTION_INCLUDED
#endif // !GLSLANG_WEB && !GLSLANG_ANGLE

View File

@ -0,0 +1,982 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMPILER_INTERFACE_INCLUDED_
#define _COMPILER_INTERFACE_INCLUDED_
#include "../Include/ResourceLimits.h"
#include "../MachineIndependent/Versions.h"
#include <cstring>
#include <vector>
#ifdef _WIN32
#define C_DECL __cdecl
#else
#define C_DECL
#endif
#ifdef GLSLANG_IS_SHARED_LIBRARY
#ifdef _WIN32
#ifdef GLSLANG_EXPORTING
#define GLSLANG_EXPORT __declspec(dllexport)
#else
#define GLSLANG_EXPORT __declspec(dllimport)
#endif
#elif __GNUC__ >= 4
#define GLSLANG_EXPORT __attribute__((visibility("default")))
#endif
#endif // GLSLANG_IS_SHARED_LIBRARY
#ifndef GLSLANG_EXPORT
#define GLSLANG_EXPORT
#endif
//
// This is the platform independent interface between an OGL driver
// and the shading language compiler/linker.
//
#ifdef __cplusplus
extern "C" {
#endif
//
// Call before doing any other compiler/linker operations.
//
// (Call once per process, not once per thread.)
//
GLSLANG_EXPORT int ShInitialize();
//
// Call this at process shutdown to clean up memory.
//
GLSLANG_EXPORT int ShFinalize();
//
// Types of languages the compiler can consume.
//
typedef enum {
EShLangVertex,
EShLangTessControl,
EShLangTessEvaluation,
EShLangGeometry,
EShLangFragment,
EShLangCompute,
EShLangRayGen,
EShLangRayGenNV = EShLangRayGen,
EShLangIntersect,
EShLangIntersectNV = EShLangIntersect,
EShLangAnyHit,
EShLangAnyHitNV = EShLangAnyHit,
EShLangClosestHit,
EShLangClosestHitNV = EShLangClosestHit,
EShLangMiss,
EShLangMissNV = EShLangMiss,
EShLangCallable,
EShLangCallableNV = EShLangCallable,
EShLangTaskNV,
EShLangMeshNV,
LAST_ELEMENT_MARKER(EShLangCount),
} EShLanguage; // would be better as stage, but this is ancient now
typedef enum : unsigned {
EShLangVertexMask = (1 << EShLangVertex),
EShLangTessControlMask = (1 << EShLangTessControl),
EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
EShLangGeometryMask = (1 << EShLangGeometry),
EShLangFragmentMask = (1 << EShLangFragment),
EShLangComputeMask = (1 << EShLangCompute),
EShLangRayGenMask = (1 << EShLangRayGen),
EShLangRayGenNVMask = EShLangRayGenMask,
EShLangIntersectMask = (1 << EShLangIntersect),
EShLangIntersectNVMask = EShLangIntersectMask,
EShLangAnyHitMask = (1 << EShLangAnyHit),
EShLangAnyHitNVMask = EShLangAnyHitMask,
EShLangClosestHitMask = (1 << EShLangClosestHit),
EShLangClosestHitNVMask = EShLangClosestHitMask,
EShLangMissMask = (1 << EShLangMiss),
EShLangMissNVMask = EShLangMissMask,
EShLangCallableMask = (1 << EShLangCallable),
EShLangCallableNVMask = EShLangCallableMask,
EShLangTaskNVMask = (1 << EShLangTaskNV),
EShLangMeshNVMask = (1 << EShLangMeshNV),
LAST_ELEMENT_MARKER(EShLanguageMaskCount),
} EShLanguageMask;
namespace glslang {
class TType;
typedef enum {
EShSourceNone,
EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL)
EShSourceHlsl, // HLSL
LAST_ELEMENT_MARKER(EShSourceCount),
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
typedef enum {
EShClientNone, // use when there is no client, e.g. for validation
EShClientVulkan, // as GLSL dialect, specifies KHR_vulkan_glsl extension
EShClientOpenGL, // as GLSL dialect, specifies ARB_gl_spirv extension
LAST_ELEMENT_MARKER(EShClientCount),
} EShClient;
typedef enum {
EShTargetNone,
EShTargetSpv, // SPIR-V (preferred spelling)
EshTargetSpv = EShTargetSpv, // legacy spelling
LAST_ELEMENT_MARKER(EShTargetCount),
} EShTargetLanguage;
typedef enum {
EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1
EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2
EShTargetVulkan_1_3 = (1 << 22) | (3 << 12), // Vulkan 1.3
EShTargetOpenGL_450 = 450, // OpenGL
LAST_ELEMENT_MARKER(EShTargetClientVersionCount = 5),
} EShTargetClientVersion;
typedef EShTargetClientVersion EshTargetClientVersion;
typedef enum {
EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0
EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1
EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2
EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3
EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4
EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5
EShTargetSpv_1_6 = (1 << 16) | (6 << 8), // SPIR-V 1.6
LAST_ELEMENT_MARKER(EShTargetLanguageVersionCount = 7),
} EShTargetLanguageVersion;
struct TInputLanguage {
EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone
EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone
EShClient dialect;
int dialectVersion; // version of client's language definition, not the client (when not EShClientNone)
bool vulkanRulesRelaxed;
};
struct TClient {
EShClient client;
EShTargetClientVersion version; // version of client itself (not the client's input dialect)
};
struct TTarget {
EShTargetLanguage language;
EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header
bool hlslFunctionality1; // can target hlsl_functionality1 extension(s)
};
// All source/client/target versions and settings.
// Can override previous methods of setting, when items are set here.
// Expected to grow, as more are added, rather than growing parameter lists.
struct TEnvironment {
TInputLanguage input; // definition of the input language
TClient client; // what client is the overall compilation being done for?
TTarget target; // what to generate
};
GLSLANG_EXPORT const char* StageName(EShLanguage);
} // end namespace glslang
//
// Types of output the linker will create.
//
typedef enum {
EShExVertexFragment,
EShExFragment
} EShExecutable;
//
// Optimization level for the compiler.
//
typedef enum {
EShOptNoGeneration,
EShOptNone,
EShOptSimple, // Optimizations that can be done quickly
EShOptFull, // Optimizations that will take more time
LAST_ELEMENT_MARKER(EshOptLevelCount),
} EShOptimizationLevel;
//
// Texture and Sampler transformation mode.
//
typedef enum {
EShTexSampTransKeep, // keep textures and samplers as is (default)
EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers
LAST_ELEMENT_MARKER(EShTexSampTransCount),
} EShTextureSamplerTransformMode;
//
// Message choices for what errors and warnings are given.
//
enum EShMessages : unsigned {
EShMsgDefault = 0, // default is to give all required errors and extra warnings
EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
EShMsgAST = (1 << 2), // print the AST intermediate representation
EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation
EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V
EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor
EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics
EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit
EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions
EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules
EShMsgDebugInfo = (1 << 10), // save debug information
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (for samplers and semantics)
EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
EShMsgEnhanced = (1 << 15), // enhanced message readability
LAST_ELEMENT_MARKER(EShMsgCount),
};
//
// Options for building reflection
//
typedef enum {
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
EShReflectionAllIOVariables = (1 << 6), // reflect all input/output variables, even if they are inactive
EShReflectionSharedStd140SSBO = (1 << 7), // Apply std140/shared rules for ubo to ssbo
EShReflectionSharedStd140UBO = (1 << 8), // Apply std140/shared rules for ubo to ssbo
LAST_ELEMENT_MARKER(EShReflectionCount),
} EShReflectionOptions;
//
// Build a table for bindings. This can be used for locating
// attributes, uniforms, globals, etc., as needed.
//
typedef struct {
const char* name;
int binding;
} ShBinding;
typedef struct {
int numBindings;
ShBinding* bindings; // array of bindings
} ShBindingTable;
//
// ShHandle held by but opaque to the driver. It is allocated,
// managed, and de-allocated by the compiler/linker. It's contents
// are defined by and used by the compiler and linker. For example,
// symbol table information and object code passed from the compiler
// to the linker can be stored where ShHandle points.
//
// If handle creation fails, 0 will be returned.
//
typedef void* ShHandle;
//
// Driver calls these to create and destroy compiler/linker
// objects.
//
GLSLANG_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader
GLSLANG_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair
GLSLANG_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object)
GLSLANG_EXPORT void ShDestruct(ShHandle);
//
// The return value of ShCompile is boolean, non-zero indicating
// success.
//
// The info-log should be written by ShCompile into
// ShHandle, so it can answer future queries.
//
GLSLANG_EXPORT int ShCompile(
const ShHandle,
const char* const shaderStrings[],
const int numStrings,
const int* lengths,
const EShOptimizationLevel,
const TBuiltInResource *resources,
int debugOptions,
int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader
bool forwardCompatible = false, // give errors for use of deprecated features
EShMessages messages = EShMsgDefault // warnings and errors
);
GLSLANG_EXPORT int ShLinkExt(
const ShHandle, // linker object
const ShHandle h[], // compiler objects to link together
const int numHandles);
//
// ShSetEncrpytionMethod is a place-holder for specifying
// how source code is encrypted.
//
GLSLANG_EXPORT void ShSetEncryptionMethod(ShHandle);
//
// All the following return 0 if the information is not
// available in the object passed down, or the object is bad.
//
GLSLANG_EXPORT const char* ShGetInfoLog(const ShHandle);
GLSLANG_EXPORT const void* ShGetExecutable(const ShHandle);
GLSLANG_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing
GLSLANG_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings
//
// Tell the linker to never assign a vertex attribute to this list of physical attributes
//
GLSLANG_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count);
//
// Returns the location ID of the named uniform.
// Returns -1 if error.
//
GLSLANG_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name);
#ifdef __cplusplus
} // end extern "C"
#endif
////////////////////////////////////////////////////////////////////////////////////////////
//
// Deferred-Lowering C++ Interface
// -----------------------------------
//
// Below is a new alternate C++ interface, which deprecates the above
// opaque handle-based interface.
//
// The below is further designed to handle multiple compilation units per stage, where
// the intermediate results, including the parse tree, are preserved until link time,
// rather than the above interface which is designed to have each compilation unit
// lowered at compile time. In the above model, linking occurs on the lowered results,
// whereas in this model intra-stage linking can occur at the parse tree
// (treeRoot in TIntermediate) level, and then a full stage can be lowered.
//
#include <list>
#include <string>
#include <utility>
class TCompiler;
class TInfoSink;
namespace glslang {
struct Version {
int major;
int minor;
int patch;
const char* flavor;
};
GLSLANG_EXPORT Version GetVersion();
GLSLANG_EXPORT const char* GetEsslVersionString();
GLSLANG_EXPORT const char* GetGlslVersionString();
GLSLANG_EXPORT int GetKhronosToolId();
class TIntermediate;
class TProgram;
class TPoolAllocator;
// Call this exactly once per process before using anything else
GLSLANG_EXPORT bool InitializeProcess();
// Call once per process to tear down everything
GLSLANG_EXPORT void FinalizeProcess();
// Resource type for IO resolver
enum TResourceType {
EResSampler,
EResTexture,
EResImage,
EResUbo,
EResSsbo,
EResUav,
EResCount
};
enum TBlockStorageClass
{
EbsUniform = 0,
EbsStorageBuffer,
EbsPushConstant,
EbsNone, // not a uniform or buffer variable
EbsCount,
};
// Make one TShader per shader that you will link into a program. Then
// - provide the shader through setStrings() or setStringsWithLengths()
// - optionally call setEnv*(), see below for more detail
// - optionally use setPreamble() to set a special shader string that will be
// processed before all others but won't affect the validity of #version
// - optionally call addProcesses() for each setting/transform,
// see comment for class TProcesses
// - call parse(): source language and target environment must be selected
// either by correct setting of EShMessages sent to parse(), or by
// explicitly calling setEnv*()
// - query the info logs
//
// N.B.: Does not yet support having the same TShader instance being linked into
// multiple programs.
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TShader {
public:
GLSLANG_EXPORT explicit TShader(EShLanguage);
GLSLANG_EXPORT virtual ~TShader();
GLSLANG_EXPORT void setStrings(const char* const* s, int n);
GLSLANG_EXPORT void setStringsWithLengths(
const char* const* s, const int* l, int n);
GLSLANG_EXPORT void setStringsWithLengthsAndNames(
const char* const* s, const int* l, const char* const* names, int n);
void setPreamble(const char* s) { preamble = s; }
GLSLANG_EXPORT void setEntryPoint(const char* entryPoint);
GLSLANG_EXPORT void setSourceEntryPoint(const char* sourceEntryPointName);
GLSLANG_EXPORT void addProcesses(const std::vector<std::string>&);
GLSLANG_EXPORT void setUniqueId(unsigned long long id);
GLSLANG_EXPORT void setOverrideVersion(int version);
// IO resolver binding data: see comments in ShaderLang.cpp
GLSLANG_EXPORT void setShiftBinding(TResourceType res, unsigned int base);
GLSLANG_EXPORT void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding
GLSLANG_EXPORT void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set);
GLSLANG_EXPORT void setResourceSetBinding(const std::vector<std::string>& base);
GLSLANG_EXPORT void setAutoMapBindings(bool map);
GLSLANG_EXPORT void setAutoMapLocations(bool map);
GLSLANG_EXPORT void addUniformLocationOverride(const char* name, int loc);
GLSLANG_EXPORT void setUniformLocationBase(int base);
GLSLANG_EXPORT void setInvertY(bool invert);
GLSLANG_EXPORT void setDxPositionW(bool dxPosW);
GLSLANG_EXPORT void setEnhancedMsgs();
#ifdef ENABLE_HLSL
GLSLANG_EXPORT void setHlslIoMapping(bool hlslIoMap);
GLSLANG_EXPORT void setFlattenUniformArrays(bool flatten);
#endif
GLSLANG_EXPORT void setNoStorageFormat(bool useUnknownFormat);
GLSLANG_EXPORT void setNanMinMaxClamp(bool nanMinMaxClamp);
GLSLANG_EXPORT void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
GLSLANG_EXPORT void addBlockStorageOverride(const char* nameStr, glslang::TBlockStorageClass backing);
GLSLANG_EXPORT void setGlobalUniformBlockName(const char* name);
GLSLANG_EXPORT void setAtomicCounterBlockName(const char* name);
GLSLANG_EXPORT void setGlobalUniformSet(unsigned int set);
GLSLANG_EXPORT void setGlobalUniformBinding(unsigned int binding);
GLSLANG_EXPORT void setAtomicCounterBlockSet(unsigned int set);
GLSLANG_EXPORT void setAtomicCounterBlockBinding(unsigned int binding);
// For setting up the environment (cleared to nothingness in the constructor).
// These must be called so that parsing is done for the right source language and
// target environment, either indirectly through TranslateEnvironment() based on
// EShMessages et. al., or directly by the user.
//
// setEnvInput: The input source language and stage. If generating code for a
// specific client, the input client semantics to use and the
// version of that client's input semantics to use, otherwise
// use EShClientNone and version of 0, e.g. for validation mode.
// Note 'version' does not describe the target environment,
// just the version of the source dialect to compile under.
// For example, to choose the Vulkan dialect of GLSL defined by
// version 100 of the KHR_vulkan_glsl extension: lang = EShSourceGlsl,
// dialect = EShClientVulkan, and version = 100.
//
// See the definitions of TEnvironment, EShSource, EShLanguage,
// and EShClient for choices and more detail.
//
// setEnvClient: The client that will be hosting the execution, and it's version.
// Note 'version' is not the version of the languages involved, but
// the version of the client environment.
// Use EShClientNone and version of 0 if there is no client, e.g.
// for validation mode.
//
// See EShTargetClientVersion for choices.
//
// setEnvTarget: The language to translate to when generating code, and that
// language's version.
// Use EShTargetNone and version of 0 if there is no client, e.g.
// for validation mode.
//
void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
{
environment.input.languageFamily = lang;
environment.input.stage = envStage;
environment.input.dialect = client;
environment.input.dialectVersion = version;
}
void setEnvClient(EShClient client, EShTargetClientVersion version)
{
environment.client.client = client;
environment.client.version = version;
}
void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version)
{
environment.target.language = lang;
environment.target.version = version;
}
void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; }
#ifdef ENABLE_HLSL
void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
#else
bool getEnvTargetHlslFunctionality1() const { return false; }
#endif
void setEnvInputVulkanRulesRelaxed() { environment.input.vulkanRulesRelaxed = true; }
bool getEnvInputVulkanRulesRelaxed() const { return environment.input.vulkanRulesRelaxed; }
// Interface to #include handlers.
//
// To support #include, a client of Glslang does the following:
// 1. Call setStringsWithNames to set the source strings and associated
// names. For example, the names could be the names of the files
// containing the shader sources.
// 2. Call parse with an Includer.
//
// When the Glslang parser encounters an #include directive, it calls
// the Includer's include method with the requested include name
// together with the current string name. The returned IncludeResult
// contains the fully resolved name of the included source, together
// with the source text that should replace the #include directive
// in the source stream. After parsing that source, Glslang will
// release the IncludeResult object.
class Includer {
public:
// An IncludeResult contains the resolved name and content of a source
// inclusion.
struct IncludeResult {
IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) :
headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { }
// For a successful inclusion, the fully resolved name of the requested
// include. For example, in a file system-based includer, full resolution
// should convert a relative path name into an absolute path name.
// For a failed inclusion, this is an empty string.
const std::string headerName;
// The content and byte length of the requested inclusion. The
// Includer producing this IncludeResult retains ownership of the
// storage.
// For a failed inclusion, the header
// field points to a string containing error details.
const char* const headerData;
const size_t headerLength;
// Include resolver's context.
void* userData;
protected:
IncludeResult& operator=(const IncludeResult&);
IncludeResult();
};
// For both include methods below:
//
// Resolves an inclusion request by name, current source name,
// and include depth.
// On success, returns an IncludeResult containing the resolved name
// and content of the include.
// On failure, returns a nullptr, or an IncludeResult
// with an empty string for the headerName and error details in the
// header field.
// The Includer retains ownership of the contents
// of the returned IncludeResult value, and those contents must
// remain valid until the releaseInclude method is called on that
// IncludeResult object.
//
// Note "local" vs. "system" is not an "either/or": "local" is an
// extra thing to do over "system". Both might get called, as per
// the C++ specification.
// For the "system" or <>-style includes; search the "system" paths.
virtual IncludeResult* includeSystem(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// For the "local"-only aspect of a "" include. Should not search in the
// "system" paths, because on returning a failure, the parser will
// call includeSystem() to look in the "system" locations.
virtual IncludeResult* includeLocal(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// Signals that the parser will no longer use the contents of the
// specified IncludeResult.
virtual void releaseInclude(IncludeResult*) = 0;
virtual ~Includer() {}
};
// Fail all Includer searches
class ForbidIncluder : public Includer {
public:
virtual void releaseInclude(IncludeResult*) override { }
};
GLSLANG_EXPORT bool parse(
const TBuiltInResource*, int defaultVersion, EProfile defaultProfile,
bool forceDefaultVersionAndProfile, bool forwardCompatible,
EShMessages, Includer&);
bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages messages)
{
TShader::ForbidIncluder includer;
return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer);
}
// Equivalent to parse() without a default profile and without forcing defaults.
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages);
}
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages,
Includer& includer)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
}
// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
// is not an officially supported or fully working path.
GLSLANG_EXPORT bool preprocess(
const TBuiltInResource* builtInResources, int defaultVersion,
EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages message, std::string* outputString,
Includer& includer);
GLSLANG_EXPORT const char* getInfoLog();
GLSLANG_EXPORT const char* getInfoDebugLog();
EShLanguage getStage() const { return stage; }
TIntermediate* getIntermediate() const { return intermediate; }
protected:
TPoolAllocator* pool;
EShLanguage stage;
TCompiler* compiler;
TIntermediate* intermediate;
TInfoSink* infoSink;
// strings and lengths follow the standard for glShaderSource:
// strings is an array of numStrings pointers to string data.
// lengths can be null, but if not it is an array of numStrings
// integers containing the length of the associated strings.
// if lengths is null or lengths[n] < 0 the associated strings[n] is
// assumed to be null-terminated.
// stringNames is the optional names for all the strings. If stringNames
// is null, then none of the strings has name. If a certain element in
// stringNames is null, then the corresponding string does not have name.
const char* const* strings; // explicit code to compile, see previous comment
const int* lengths;
const char* const* stringNames;
int numStrings; // size of the above arrays
const char* preamble; // string of implicit code to compile before the explicitly provided code
// a function in the source string can be renamed FROM this TO the name given in setEntryPoint.
std::string sourceEntryPointName;
// overrides #version in shader source or default version if #version isn't present
int overrideVersion;
TEnvironment environment;
friend class TProgram;
private:
TShader& operator=(TShader&);
};
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
GLSLANG_EXPORT TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
const TType* getType() const { return type; }
GLSLANG_EXPORT int getBinding() const;
GLSLANG_EXPORT void dump() const;
static TObjectReflection badReflection() { return TObjectReflection(); }
std::string name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
int numMembers;
int arrayStride; // stride of an array variable
int topLevelArraySize; // size of the top-level variable in a storage buffer member
int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
EShLanguageMask stages;
protected:
TObjectReflection()
: offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
{
}
const TType* type;
};
class TReflection;
class TIoMapper;
struct TVarEntryInfo;
// Allows to customize the binding layout after linking.
// All used uniform variables will invoke at least validateBinding.
// If validateBinding returned true then the other resolveBinding,
// resolveSet, and resolveLocation are invoked to resolve the binding
// and descriptor set index respectively.
//
// Invocations happen in a particular order:
// 1) all shader inputs
// 2) all shader outputs
// 3) all uniforms with binding and set already defined
// 4) all uniforms with binding but no set defined
// 5) all uniforms with set but no binding defined
// 6) all uniforms with no binding and no set defined
//
// mapIO will use this resolver in two phases. The first
// phase is a notification phase, calling the corresponging
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
class TIoMapResolver
{
public:
virtual ~TIoMapResolver() {}
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Called by mapIO when it starts its symbol collect for teh given stage
virtual void beginCollect(EShLanguage stage) = 0;
// Called by mapIO when it has finished the symbol collect
virtual void endCollect(EShLanguage stage) = 0;
// Called by TSlotCollector to resolve storage locations or bindings
virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by TSlotCollector to resolve resource locations or bindings
virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline
virtual void addStage(EShLanguage stage, TIntermediate& stageIntermediate) = 0;
};
#endif // !GLSLANG_WEB && !GLSLANG_ANGLE
// Make one TProgram per set of shaders that will get linked together. Add all
// the shaders that are to be linked together. After calling shader.parse()
// for all shaders, call link().
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TProgram {
public:
GLSLANG_EXPORT TProgram();
GLSLANG_EXPORT virtual ~TProgram();
void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
std::list<TShader*>& getShaders(EShLanguage stage) { return stages[stage]; }
// Link Validation interface
GLSLANG_EXPORT bool link(EShMessages);
GLSLANG_EXPORT const char* getInfoLog();
GLSLANG_EXPORT const char* getInfoDebugLog();
TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
// Reflection Interface
// call first, to do liveness analysis, index mapping, etc.; returns false on failure
GLSLANG_EXPORT bool buildReflection(int opts = EShReflectionDefault);
GLSLANG_EXPORT unsigned getLocalSize(int dim) const; // return dim'th local size
GLSLANG_EXPORT int getReflectionIndex(const char *name) const;
GLSLANG_EXPORT int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const;
GLSLANG_EXPORT int getNumUniformVariables() const;
GLSLANG_EXPORT const TObjectReflection& getUniform(int index) const;
GLSLANG_EXPORT int getNumUniformBlocks() const;
GLSLANG_EXPORT const TObjectReflection& getUniformBlock(int index) const;
GLSLANG_EXPORT int getNumPipeInputs() const;
GLSLANG_EXPORT const TObjectReflection& getPipeInput(int index) const;
GLSLANG_EXPORT int getNumPipeOutputs() const;
GLSLANG_EXPORT const TObjectReflection& getPipeOutput(int index) const;
GLSLANG_EXPORT int getNumBufferVariables() const;
GLSLANG_EXPORT const TObjectReflection& getBufferVariable(int index) const;
GLSLANG_EXPORT int getNumBufferBlocks() const;
GLSLANG_EXPORT const TObjectReflection& getBufferBlock(int index) const;
GLSLANG_EXPORT int getNumAtomicCounters() const;
GLSLANG_EXPORT const TObjectReflection& getAtomicCounter(int index) const;
// Legacy Reflection Interface - expressed in terms of above interface
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
// can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
int getNumLiveAttributes() const { return getNumPipeInputs(); }
// can be used for glGetUniformIndices()
int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
int getPipeIOIndex(const char *name, const bool inOrOut) const
{ return getReflectionPipeIOIndex(name, inOrOut); }
// can be used for "name" part of glGetActiveUniform()
const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
// returns the binding number
int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
// returns Shaders Stages where a Uniform is present
EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockIndex(int index) const { return getUniform(index).index; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformType(int index) const { return getUniform(index).glDefineType; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getUniformArraySize(int index) const { return getUniform(index).size; }
// returns a TType*
const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
// can be used for glGetActiveUniformBlockName()
const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
// can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
// returns the block binding number
int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
// returns block index of associated counter.
int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
// returns a TType*
const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
// can be used for glGetActiveAttrib()
const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
// can be used for glGetActiveAttrib()
int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
// returns a TType*
const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
GLSLANG_EXPORT void dumpReflection();
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.
GLSLANG_EXPORT bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr);
#endif // !GLSLANG_WEB && !GLSLANG_ANGLE
protected:
GLSLANG_EXPORT bool linkStage(EShLanguage, EShMessages);
GLSLANG_EXPORT bool crossStageCheck(EShMessages);
TPoolAllocator* pool;
std::list<TShader*> stages[EShLangCount];
TIntermediate* intermediate[EShLangCount];
bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
TInfoSink* infoSink;
#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE)
TReflection* reflection;
#endif
bool linked;
private:
TProgram(TProgram&);
TProgram& operator=(TProgram&);
};
} // end namespace glslang
#endif // _COMPILER_INTERFACE_INCLUDED_

View File

@ -0,0 +1,108 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextAMD_H
#define GLSLextAMD_H
static const int GLSLextAMDVersion = 100;
static const int GLSLextAMDRevision = 7;
// SPV_AMD_shader_ballot
static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
enum ShaderBallotAMD {
ShaderBallotBadAMD = 0, // Don't use
SwizzleInvocationsAMD = 1,
SwizzleInvocationsMaskedAMD = 2,
WriteInvocationAMD = 3,
MbcntAMD = 4,
ShaderBallotCountAMD
};
// SPV_AMD_shader_trinary_minmax
static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
enum ShaderTrinaryMinMaxAMD {
ShaderTrinaryMinMaxBadAMD = 0, // Don't use
FMin3AMD = 1,
UMin3AMD = 2,
SMin3AMD = 3,
FMax3AMD = 4,
UMax3AMD = 5,
SMax3AMD = 6,
FMid3AMD = 7,
UMid3AMD = 8,
SMid3AMD = 9,
ShaderTrinaryMinMaxCountAMD
};
// SPV_AMD_shader_explicit_vertex_parameter
static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
enum ShaderExplicitVertexParameterAMD {
ShaderExplicitVertexParameterBadAMD = 0, // Don't use
InterpolateAtVertexAMD = 1,
ShaderExplicitVertexParameterCountAMD
};
// SPV_AMD_gcn_shader
static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
enum GcnShaderAMD {
GcnShaderBadAMD = 0, // Don't use
CubeFaceIndexAMD = 1,
CubeFaceCoordAMD = 2,
TimeAMD = 3,
GcnShaderCountAMD
};
// SPV_AMD_gpu_shader_half_float
static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
// SPV_AMD_texture_gather_bias_lod
static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod";
// SPV_AMD_gpu_shader_int16
static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16";
// SPV_AMD_shader_image_load_store_lod
static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod";
// SPV_AMD_shader_fragment_mask
static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask";
// SPV_AMD_gpu_shader_half_float_fetch
static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch";
#endif // #ifndef GLSLextAMD_H

View File

@ -0,0 +1,43 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextEXT_H
#define GLSLextEXT_H
static const int GLSLextEXTVersion = 100;
static const int GLSLextEXTRevision = 2;
static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
static const char* const E_SPV_EXT_shader_atomic_float_add = "SPV_EXT_shader_atomic_float_add";
static const char* const E_SPV_EXT_shader_atomic_float16_add = "SPV_EXT_shader_atomic_float16_add";
static const char* const E_SPV_EXT_shader_atomic_float_min_max = "SPV_EXT_shader_atomic_float_min_max";
static const char* const E_SPV_EXT_shader_image_int64 = "SPV_EXT_shader_image_int64";
#endif // #ifndef GLSLextEXT_H

View File

@ -0,0 +1,56 @@
/*
** Copyright (c) 2014-2020 The Khronos Group Inc.
** Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextKHR_H
#define GLSLextKHR_H
static const int GLSLextKHRVersion = 100;
static const int GLSLextKHRRevision = 2;
static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot";
static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote";
static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group";
static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer";
static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock";
static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock";
static const char* const E_SPV_KHR_non_semantic_info = "SPV_KHR_non_semantic_info";
static const char* const E_SPV_KHR_ray_tracing = "SPV_KHR_ray_tracing";
static const char* const E_SPV_KHR_ray_query = "SPV_KHR_ray_query";
static const char* const E_SPV_KHR_fragment_shading_rate = "SPV_KHR_fragment_shading_rate";
static const char* const E_SPV_KHR_terminate_invocation = "SPV_KHR_terminate_invocation";
static const char* const E_SPV_KHR_workgroup_memory_explicit_layout = "SPV_KHR_workgroup_memory_explicit_layout";
static const char* const E_SPV_KHR_subgroup_uniform_control_flow = "SPV_KHR_subgroup_uniform_control_flow";
#endif // #ifndef GLSLextKHR_H

View File

@ -0,0 +1,84 @@
/*
** Copyright (c) 2014-2017 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextNV_H
#define GLSLextNV_H
enum BuiltIn;
enum Decoration;
enum Op;
enum Capability;
static const int GLSLextNVVersion = 100;
static const int GLSLextNVRevision = 11;
//SPV_NV_sample_mask_override_coverage
const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
//SPV_NV_geometry_shader_passthrough
const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough";
//SPV_NV_viewport_array2
const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2";
const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array";
//SPV_NV_stereo_view_rendering
const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering";
//SPV_NVX_multiview_per_view_attributes
const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes";
//SPV_NV_shader_subgroup_partitioned
const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned";
//SPV_NV_fragment_shader_barycentric
const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric";
//SPV_NV_compute_shader_derivatives
const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives";
//SPV_NV_shader_image_footprint
const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint";
//SPV_NV_mesh_shader
const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader";
//SPV_NV_raytracing
const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing";
//SPV_NV_ray_tracing_motion_blur
const char* const E_SPV_NV_ray_tracing_motion_blur = "SPV_NV_ray_tracing_motion_blur";
//SPV_NV_shading_rate
const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
//SPV_NV_cooperative_matrix
const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
//SPV_NV_shader_sm_builtins
const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins";
#endif // #ifndef GLSLextNV_H

Some files were not shown because too many files have changed in this diff Show More