diff --git a/firedrake/formmanipulation.py b/firedrake/formmanipulation.py index 35a6789107..394fdcca95 100644 --- a/firedrake/formmanipulation.py +++ b/firedrake/formmanipulation.py @@ -124,7 +124,7 @@ def argument(self, o): args += [a_[j] for j in numpy.ndindex(a_.ufl_shape)] else: args += [Zero() - for j in numpy.ndindex(V_is[i].value_shape)] + for j in numpy.ndindex(V_is[i].block_size)] return self._arg_cache.setdefault(o, as_vector(args)) diff --git a/firedrake/functionspaceimpl.py b/firedrake/functionspaceimpl.py index 22bc4735ff..074606c124 100644 --- a/firedrake/functionspaceimpl.py +++ b/firedrake/functionspaceimpl.py @@ -176,7 +176,7 @@ def split(self): def _components(self): if len(self) == 1: return tuple(type(self).create(self.topological.sub(i), self.mesh()) - for i in range(numpy.prod(self.shape))) + for i in range(self.block_size)) else: return self.subfunctions @@ -494,6 +494,9 @@ def __init__(self, mesh, element, name=None): self._ufl_function_space = ufl.FunctionSpace(mesh.ufl_mesh(), element, label=self._label) self._mesh = mesh + self.value_size = self._ufl_function_space.value_size + r"""The number of scalar components of this :class:`FunctionSpace`.""" + self.rank = len(self.shape) r"""The rank of this :class:`FunctionSpace`. Spaces where the element is scalar-valued (or intrinsically vector-valued) have @@ -502,7 +505,7 @@ def __init__(self, mesh, element, name=None): the number of components of their :attr:`finat.ufl.finiteelementbase.FiniteElementBase.value_shape`.""" - self.value_size = int(numpy.prod(self.shape, dtype=int)) + self.block_size = int(numpy.prod(self.shape, dtype=int)) r"""The total number of degrees of freedom at each function space node.""" self.name = name @@ -651,7 +654,7 @@ def __getitem__(self, i): @utils.cached_property def _components(self): - return tuple(ComponentFunctionSpace(self, i) for i in range(numpy.prod(self.shape))) + return tuple(ComponentFunctionSpace(self, i) for i in range(self.block_size)) def sub(self, i): r"""Return a view into the ith component.""" @@ -681,7 +684,7 @@ def node_count(self): def dof_count(self): r"""The number of degrees of freedom (includes halo dofs) of this function space on this process. Cf. :attr:`FunctionSpace.node_count` .""" - return self.node_count*self.value_size + return self.node_count*self.block_size def dim(self): r"""The global number of degrees of freedom for this function space. @@ -818,7 +821,7 @@ def local_to_global_map(self, bcs, lgmap=None): else: indices = lgmap.block_indices.copy() bsize = lgmap.getBlockSize() - assert bsize == self.value_size + assert bsize == self.block_size else: # MatBlock case, LGMap is already unrolled. indices = lgmap.block_indices.copy() @@ -827,11 +830,11 @@ def local_to_global_map(self, bcs, lgmap=None): nodes = [] for bc in bcs: if bc.function_space().component is not None: - nodes.append(bc.nodes * self.value_size + nodes.append(bc.nodes * self.block_size + bc.function_space().component) elif unblocked: - tmp = bc.nodes * self.value_size - for i in range(self.value_size): + tmp = bc.nodes * self.block_size + for i in range(self.block_size): nodes.append(tmp + i) else: nodes.append(bc.nodes) @@ -1299,9 +1302,9 @@ def ComponentFunctionSpace(parent, component): """ element = parent.ufl_element() assert type(element) in frozenset([finat.ufl.VectorElement, finat.ufl.TensorElement]) - if not (0 <= component < parent.value_size): + if not (0 <= component < parent.block_size): raise IndexError("Invalid component %d. not in [0, %d)" % - (component, parent.value_size)) + (component, parent.block_size)) new = ProxyFunctionSpace(parent.mesh(), element.sub_elements[0], name=parent.name) new.identifier = "component" new.component = component @@ -1345,7 +1348,7 @@ def make_dof_dset(self): def make_dat(self, val=None, valuetype=None, name=None): r"""Return a newly allocated :class:`pyop2.types.glob.Global` representing the data for a :class:`.Function` on this space.""" - return op2.Global(self.value_size, val, valuetype, name, self._comm) + return op2.Global(self.block_size, val, valuetype, name, self._comm) def entity_node_map(self, source_mesh, source_integral_type, source_subdomain_id, source_all_integer_subdomain_ids): return None diff --git a/firedrake/mg/embedded.py b/firedrake/mg/embedded.py index cb0e723a26..d6a3b72a94 100644 --- a/firedrake/mg/embedded.py +++ b/firedrake/mg/embedded.py @@ -111,7 +111,7 @@ def V_dof_weights(self, V): # global Vector counting the number of cells that see each # dof. f = firedrake.Function(V) - firedrake.par_loop(("{[i, j]: 0 <= i < A.dofs and 0 <= j < %d}" % V.value_size, + firedrake.par_loop(("{[i, j]: 0 <= i < A.dofs and 0 <= j < %d}" % V.block_size, "A[i, j] = A[i, j] + 1"), firedrake.dx, {"A": (f, firedrake.INC)}) diff --git a/firedrake/preconditioners/asm.py b/firedrake/preconditioners/asm.py index 86100138e6..19738fe438 100644 --- a/firedrake/preconditioners/asm.py +++ b/firedrake/preconditioners/asm.py @@ -81,8 +81,8 @@ def initialize(self, pc): tinyasm.SetASMLocalSubdomains( asmpc, ises, [W.dm.getDefaultSF() for W in V], - [W.value_size for W in V], - sum(W.value_size * W.dof_dset.total_size for W in V)) + [W.block_size for W in V], + sum(W.block_size * W.dof_dset.total_size for W in V)) asmpc.setUp() else: raise ValueError(f"Unknown backend type {backend}") @@ -168,7 +168,7 @@ def get_patches(self, V): continue off = section.getOffset(p) # Local indices within W - W_indices = slice(off*W.value_size, W.value_size * (off + dof)) + W_indices = slice(off*W.block_size, W.block_size * (off + dof)) indices.extend(V_local_ises_indices[i][W_indices]) iset = PETSc.IS().createGeneral(indices, comm=PETSc.COMM_SELF) ises.append(iset) @@ -247,7 +247,7 @@ def get_patches(self, V): continue off = section.getOffset(p) # Local indices within W - W_indices = slice(off*W.value_size, W.value_size * (off + dof)) + W_indices = slice(off*W.block_size, W.block_size * (off + dof)) indices.extend(V_local_ises_indices[i][W_indices]) iset = PETSc.IS().createGeneral(indices, comm=PETSc.COMM_SELF) ises.append(iset) @@ -296,7 +296,7 @@ def get_patches(self, V): if dof <= 0: continue off = section.getOffset(p) - indices = numpy.arange(off*V.value_size, V.value_size * (off + dof), dtype=IntType) + indices = numpy.arange(off*V.block_size, V.block_size * (off + dof), dtype=IntType) iset = PETSc.IS().createGeneral(indices, comm=PETSc.COMM_SELF) ises.append(iset) @@ -471,7 +471,7 @@ def get_patches(self, V): else: begin = off + min(k, k+plane) * blayer_offset + dof end = off + max(k, k+plane) * blayer_offset - zlice = slice(W.value_size * begin, W.value_size * end) + zlice = slice(W.block_size * begin, W.block_size * end) indices.extend(iset[zlice]) iset = PETSc.IS().createGeneral(indices, comm=PETSc.COMM_SELF) diff --git a/firedrake/preconditioners/facet_split.py b/firedrake/preconditioners/facet_split.py index a1c43dca7c..4c8e654ce4 100644 --- a/firedrake/preconditioners/facet_split.py +++ b/firedrake/preconditioners/facet_split.py @@ -258,7 +258,7 @@ def get_restriction_indices(V, W): eperm = numpy.concatenate((eperm, numpy.setdiff1d(numpy.arange(vsize, dtype=PETSc.IntType), eperm))) pmap = PermutedMap(V.cell_node_map(), eperm) - wsize = sum(Vsub.finat_element.space_dimension() * Vsub.value_size for Vsub in W) + wsize = sum(Vsub.finat_element.space_dimension() * Vsub.block_size for Vsub in W) kernel_code = f""" void copy(PetscInt *restrict w, const PetscInt *restrict v) {{ for (PetscInt i=0; i<{wsize}; i++) w[i] = v[i]; diff --git a/firedrake/preconditioners/fdm.py b/firedrake/preconditioners/fdm.py index 53d03c2a7f..82e56057e7 100644 --- a/firedrake/preconditioners/fdm.py +++ b/firedrake/preconditioners/fdm.py @@ -225,10 +225,10 @@ def allocate_matrix(self, Amat, V, J, bcs, fcp, pmat_type, use_static_condensati raise ValueError("Expecting at most one FunctionSpace restricted onto facets.") self.embedding_element = ebig - if Vbig.value_size == 1: + if Vbig.block_size == 1: self.fises = PETSc.IS().createGeneral(fdofs, comm=COMM_SELF) else: - self.fises = PETSc.IS().createBlock(Vbig.value_size, fdofs, comm=COMM_SELF) + self.fises = PETSc.IS().createBlock(Vbig.block_size, fdofs, comm=COMM_SELF) # Create data structures needed for assembly self.lgmaps = {Vsub: Vsub.local_to_global_map([bc for bc in bcs if bc.function_space() == Vsub]) for Vsub in V} @@ -245,7 +245,7 @@ def allocate_matrix(self, Amat, V, J, bcs, fcp, pmat_type, use_static_condensati self.schur_kernel[V] = SchurComplementPattern elif Vfacet and use_static_condensation: # If we are in a facet space, we build the Schur complement on its diagonal block - if Vfacet.finat_element.formdegree == 0 and Vfacet.value_size == 1: + if Vfacet.finat_element.formdegree == 0 and Vfacet.block_size == 1: self.schur_kernel[Vfacet] = SchurComplementDiagonal interior_pc_type = PETSc.PC.Type.JACOBI elif symmetric: @@ -560,7 +560,7 @@ def assemble_reference_tensor(self, V, transpose=False, sort_interior=False): :returns: a :class:`PETSc.Mat` interpolating V^k * d(V^k) onto broken(V^k) * broken(V^{k+1}) on the reference element. """ - value_size = V.value_size + bsize = V.block_size fe = V.finat_element tdim = fe.cell.get_spatial_dimension() formdegree = fe.formdegree @@ -570,7 +570,7 @@ def assemble_reference_tensor(self, V, transpose=False, sort_interior=False): if formdegree == tdim: degree = degree + 1 is_interior, is_facet = is_restricted(fe) - key = (value_size, tdim, degree, formdegree, is_interior, is_facet, transpose, sort_interior) + key = (bsize, tdim, degree, formdegree, is_interior, is_facet, transpose, sort_interior) cache = self._cache.setdefault("reference_tensor", {}) try: return cache[key] @@ -637,8 +637,8 @@ def assemble_reference_tensor(self, V, transpose=False, sort_interior=False): A00.destroy() A11.destroy() A10.destroy() - if value_size != 1: - eye = petsc_sparse(numpy.eye(value_size), comm=result.getComm()) + if bsize != 1: + eye = petsc_sparse(numpy.eye(bsize), comm=result.getComm()) temp = result result = temp.kron(eye) temp.destroy() @@ -1352,10 +1352,10 @@ def __init__(self, kernel, name=None): V = FunctionSpace(Q.mesh(), unrestrict_element(Q.ufl_element())) V0 = FunctionSpace(Q.mesh(), restrict_element(V.ufl_element(), "interior")) V1 = FunctionSpace(Q.mesh(), restrict_element(V.ufl_element(), "facet")) - idofs = PETSc.IS().createBlock(V.value_size, restricted_dofs(V0.finat_element, V.finat_element), comm=comm) - fdofs = PETSc.IS().createBlock(V.value_size, restricted_dofs(V1.finat_element, V.finat_element), comm=comm) + idofs = PETSc.IS().createBlock(V.block_size, restricted_dofs(V0.finat_element, V.finat_element), comm=comm) + fdofs = PETSc.IS().createBlock(V.block_size, restricted_dofs(V1.finat_element, V.finat_element), comm=comm) size = idofs.size + fdofs.size - assert size == V.finat_element.space_dimension() * V.value_size + assert size == V.finat_element.space_dimension() * V.block_size # Bilinear form on the space with interior and interface a = form if Q == V else form(*(t.reconstruct(function_space=V) for t in args)) # Generate code to apply the action of A within the Schur complement action @@ -1621,15 +1621,14 @@ def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None): fises.destroy() cises.destroy() - if Vf.value_size > 1: + if Vf.block_size > 1: temp = Dhat - eye = petsc_sparse(numpy.eye(Vf.value_size, dtype=PETSc.RealType), comm=temp.getComm()) + eye = petsc_sparse(numpy.eye(Vf.block_size, dtype=PETSc.RealType), comm=temp.getComm()) Dhat = temp.kron(eye) temp.destroy() eye.destroy() sizes = tuple(V.dof_dset.layout_vec.getSizes() for V in (Vf, Vc)) - block_size = Vf.dof_dset.layout_vec.getBlockSize() preallocator = PETSc.Mat().create(comm=comm) preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) preallocator.setSizes(sizes) @@ -1647,7 +1646,7 @@ def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None): nnz = get_preallocation(preallocator, sizes[0][0]) preallocator.destroy() - Dmat = PETSc.Mat().createAIJ(sizes, block_size, nnz=nnz, comm=comm) + Dmat = PETSc.Mat().createAIJ(sizes, Vf.block_size, nnz=nnz, comm=comm) Dmat.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) assembler.arguments[0].data = Dmat.handle assembler() @@ -1865,8 +1864,7 @@ def cell_to_global(lgmap, cell_to_local, cell_index, result=None): result = cell_to_local(cell_index, result=result) return lgmap.apply(result, result=result) - bsize = Vrow.dof_dset.layout_vec.getBlockSize() - cell_to_local, nel = extrude_node_map(Vrow.cell_node_map(), bsize=bsize) + cell_to_local, nel = extrude_node_map(Vrow.cell_node_map(), bsize=Vrow.block_size) get_rindices = partial(cell_to_global, self.lgmaps[Vrow], cell_to_local) Afdm, Dfdm, bdof, axes_shifts = self.assemble_reference_tensor(Vrow) @@ -1877,7 +1875,7 @@ def cell_to_global(lgmap, cell_to_local, cell_index, result=None): PT_facet = self.coefficients.get("PT_facet") V = Vrow - bsize = V.value_size + bsize = V.block_size ncomp = V.ufl_element().reference_value_size sdim = (V.finat_element.space_dimension() * bsize) // ncomp # dimension of a single component tdim = V.mesh().topological_dimension() diff --git a/firedrake/preconditioners/patch.py b/firedrake/preconditioners/patch.py index b74897f574..f7a14c1e2f 100644 --- a/firedrake/preconditioners/patch.py +++ b/firedrake/preconditioners/patch.py @@ -580,7 +580,7 @@ def bcdofs(bc, ghost=True): if isinstance(Z.ufl_element(), VectorElement): offset += idx assert i == len(indices)-1 # assert we're at the end of the chain - assert Z.sub(idx).value_size == 1 + assert Z.sub(idx).block_size == 1 elif isinstance(Z.ufl_element(), MixedElement): if ghost: offset += sum(Z.sub(j).dof_count for j in range(idx)) diff --git a/firedrake/preconditioners/pmg.py b/firedrake/preconditioners/pmg.py index cebbb5f74a..ab6de8c6b7 100644 --- a/firedrake/preconditioners/pmg.py +++ b/firedrake/preconditioners/pmg.py @@ -904,19 +904,19 @@ def make_kron_code(Vc, Vf, t_in, t_out, mat_name, scratch): shifts = fshifts in_place = False if len(felems) == len(celems): - in_place = all((len(fs)*Vf.value_size == len(cs)*Vc.value_size) for fs, cs in zip(fshifts, cshifts)) - psize = Vf.value_size + in_place = all((len(fs)*Vf.block_size == len(cs)*Vc.block_size) for fs, cs in zip(fshifts, cshifts)) + psize = Vf.block_size if not in_place: if len(celems) == 1: - psize = Vc.value_size + psize = Vc.block_size pelem = celems[0] perm_name = "perm_%s" % t_in celems = celems*len(felems) elif len(felems) == 1: shifts = cshifts - psize = Vf.value_size + psize = Vf.block_size pelem = felems[0] perm_name = "perm_%s" % t_out felems = felems*len(celems) @@ -924,7 +924,7 @@ def make_kron_code(Vc, Vf, t_in, t_out, mat_name, scratch): raise ValueError("Cannot assign fine to coarse DOFs") if set(cshifts) == set(fshifts): - csize = Vc.value_size * Vc.finat_element.space_dimension() + csize = Vc.block_size * Vc.finat_element.space_dimension() prolong_code.append(f""" for({IntType_c} j=1; j<{len(fshifts)}; j++) for({IntType_c} i=0; i<{csize}; i++) @@ -938,8 +938,8 @@ def make_kron_code(Vc, Vf, t_in, t_out, mat_name, scratch): elif pelem == celems[0]: for k in range(len(shifts)): - if Vc.value_size*len(shifts[k]) < Vf.value_size: - shifts[k] = shifts[k]*(Vf.value_size//Vc.value_size) + if Vc.block_size*len(shifts[k]) < Vf.block_size: + shifts[k] = shifts[k]*(Vf.block_size//Vc.block_size) pshape = [e.space_dimension() for e in pelem] pargs = ", ".join(map(str, pshape+[1]*(3-len(pshape)))) @@ -1115,7 +1115,7 @@ def make_mapping_code(Q, cmapping, fmapping, t_in, t_out): coef_args = "".join([", c%d" % i for i in range(len(coefficients))]) coef_decl = "".join([", PetscScalar const *restrict c%d" % i for i in range(len(coefficients))]) - qlen = Q.value_size * Q.finat_element.space_dimension() + qlen = Q.block_size * Q.finat_element.space_dimension() prolong_code = f""" for({IntType_c} i=0; i<{qlen}; i++) {t_out}[i] = 0.0E0; @@ -1221,7 +1221,7 @@ def work_function(self, V): @cached_property def _weight(self): weight = firedrake.Function(self.Vf) - size = self.Vf.finat_element.space_dimension() * self.Vf.value_size + size = self.Vf.finat_element.space_dimension() * self.Vf.block_size kernel_code = f""" void weight(PetscScalar *restrict w){{ for(PetscInt i=0; i<{size}; i++) w[i] += 1.0; @@ -1350,7 +1350,7 @@ def make_blas_kernels(self, Vf, Vc): Qf = firedrake.FunctionSpace(Vf.mesh(), qelem) mapping_output = make_mapping_code(Qf, cmapping, fmapping, "t0", "t1") - qshape = (Qf.value_size, Qf.finat_element.space_dimension()) + qshape = (Qf.block_size, Qf.finat_element.space_dimension()) # interpolate to embedding fine space decl[0], prolong[0], restrict[0], shapes = make_kron_code(Vc, Qf, "t0", "t1", "J0", "t2") @@ -1375,8 +1375,8 @@ def make_blas_kernels(self, Vf, Vc): # We could benefit from loop tiling for the transpose, but that makes the code # more complicated. - fshape = (Vf.value_size, Vf.finat_element.space_dimension()) - cshape = (Vc.value_size, Vc.finat_element.space_dimension()) + fshape = (numpy.prod(Vf.shape), Vf.finat_element.space_dimension()) + cshape = (numpy.prod(Vc.shape), Vc.finat_element.space_dimension()) lwork = numpy.prod([max(*dims) for dims in zip(*shapes)]) lwork = max(lwork, max(numpy.prod(fshape), numpy.prod(cshape))) @@ -1468,8 +1468,8 @@ def make_kernels(self, Vf, Vc): element_kernel = element_kernel.replace("void expression_kernel", "static void expression_kernel") coef_args = "".join([", c%d" % i for i in range(len(coefficients))]) coef_decl = "".join([", const %s *restrict c%d" % (ScalarType_c, i) for i in range(len(coefficients))]) - dimc = Vc.finat_element.space_dimension() * Vc.value_size - dimf = Vf.finat_element.space_dimension() * Vf.value_size + dimc = Vc.finat_element.space_dimension() * Vc.block_size + dimf = Vf.finat_element.space_dimension() * Vf.block_size restrict_code = f""" {element_kernel}