无法从与laravel的关系中提取数据

时间:2018-05-28 18:14:03

标签: php laravel laravel-5

我正在尝试建立一对多关系,但我收到以下错误

  

未定义属性:stdClass :: $ client(查看:   C:\瓦帕\ WWW \内部网\资源\视图\用户\ list.blade.php)

问题是我正在使用表中没有#include <cudnn.h> #include <cassert> #include <cstdlib> #include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/dnn.hpp> using namespace cv; using namespace cv::dnn; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } cv::Mat load_image_NCHW(const char* image_path) { cv::Mat image = cv::imread(image_path, cv::IMREAD_COLOR); image.convertTo(image, CV_32FC3); cv::normalize(image,image,0,1, cv::NORM_MINMAX); cv::Mat inputBlob = blobFromImage(image, 1.0f, cv::Size(image.rows,image.cols), cv::Scalar(0,0,0)); return inputBlob; } void save_image(const char* output_filename, float* buffer, int height, int width) { cv::Mat output_image(height, width, CV_32FC3, buffer); // Make negative values zero. cv::threshold(output_image, output_image, /*threshold=*/0, /*maxval=*/0, cv::THRESH_TOZERO); cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); std::cerr << "Wrote output to " << output_filename << std::endl; } int main(int argc, const char* argv[]) { if (argc < 2) { std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl; std::exit(EXIT_FAILURE); } int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0; std::cerr << "GPU: " << gpu_id << std::endl; bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0; std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl; // Load the image cv::Mat image = load_image_NCHW(argv[1]); int imgH = 600; int imgW = 561; int inC = 3; // Set GPU to use cudaSetDevice(gpu_id); // Create the cudnn Handle cudnnHandle_t cudnn; checkCUDNN(cudnnCreate(&cudnn)); // Need a descriptor for // The input, kernel, and convolution cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/inC, /*image_height=*/imgH, /*image_width=*/imgW)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/3, /*in_channels=*/inC, /*kernel_height=*/3, /*kernel_width=*/3)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, /*computeType=*/CUDNN_DATA_FLOAT)); // Need to compute the output size int batch_size{0}, channels{0}, height{0}, width{0}; checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor, input_descriptor, kernel_descriptor, &batch_size, &channels, &height, &width)); std::cerr << "Output Image: " << height << " x " << width << " x " << channels << std::endl; // Need an output descriptor cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/3, /*image_height=*/imgH, /*image_width=*/imgW)); // Need to define the forward algorithm cudnnConvolutionFwdAlgo_t convolution_algorithm = CUDNN_CONVOLUTION_FWD_ALGO_FFT; // Have to compute the workspace size size_t workspace_bytes{0}; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB" << std::endl; assert(workspace_bytes > 0); // Allocate the memory needed for the workspace void* d_workspace{nullptr}; cudaMalloc(&d_workspace, workspace_bytes); // Allocate memory for the batch of images // and copy from host to device int image_bytes = batch_size * channels * height * width * sizeof(float); float* d_input{nullptr}; cudaMalloc(&d_input, image_bytes); cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice); // Allocate memory for the output images // Copy from host to device float* d_output{nullptr}; cudaMalloc(&d_output, image_bytes); cudaMemset(d_output, 0, image_bytes); // clang-format off const float kernel_template[3][3] = { {1, 1, 1}, {1, -8, 1}, {1, 1, 1} }; // clang-format on float h_kernel[3][3][3][3]; for (int kernel = 0; kernel < 3; ++kernel) { for (int channel = 0; channel < 3; ++channel) { for (int row = 0; row < 3; ++row) { for (int column = 0; column < 3; ++column) { h_kernel[kernel][channel][row][column] = kernel_template[row][column]; } } } } float* d_kernel{nullptr}; cudaMalloc(&d_kernel, sizeof(h_kernel)); cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice); // Perform actual convolution const float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, d_input, kernel_descriptor, d_kernel, convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, d_output)); // If wish to use sigmoid activation if (with_sigmoid) { cudnnActivationDescriptor_t activation_descriptor; checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor)); checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor, CUDNN_ACTIVATION_SIGMOID, CUDNN_PROPAGATE_NAN, /*relu_coef=*/0)); checkCUDNN(cudnnActivationForward(cudnn, activation_descriptor, &alpha, output_descriptor, d_output, &beta, output_descriptor, d_output)); cudnnDestroyActivationDescriptor(activation_descriptor); } // Move results to host float* h_output = new float[image_bytes]; cudaMemcpy(h_output, d_output, image_bytes, cudaMemcpyDeviceToHost); save_image("cudnn-out.png", h_output, height, width); // Free memory delete[] h_output; cudaFree(d_kernel); cudaFree(d_input); cudaFree(d_output); cudaFree(d_workspace); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); } 字段的现有数据库,外键也是典型的id

我的模型client_id

Client.php

我的模型class Client extends Model { protected $connection = 'dpnmwin'; protected $table = 'nmundfunc'; public function employee(){ return $this->hasMany('App\Employee'); } }

Employee.php

class Employee extends Model { protected $connection = 'dpnmwin'; protected $table = 'nmtrabajador'; public function client(){ return $this->belongsTo('App\Client', 'COD_UND'); } } COD_UND字段中将是与nmtrabajador相关的外键。

我尝试以这样的方式获取数据:nmundfunc

但它不会给我带来错误,我该如何解决呢?

我在哪里发送的控制器

{{$user->client->CEN_DESCRI}}

2 个答案:

答案 0 :(得分:1)

你必须以关系为基础。

此代码将返回数据。

如果你有id,那么你可以通过id找到如下

$employee=Employee::find(1);

或者,如果您想获取所有数据,则可以调用所有方法。

Employee::all();

然后你可以按照你在模型中定义的关系来获得它。

$client=$employee->client->CEN_DESCRI;

从实例中检索数据基于我们使用的方法。 在这个答案中,你可以得到那个

Property [title] does not exist on this collection instance

我希望它能奏效。

答案 1 :(得分:0)

如果表格没有&#39; id&#39;作为主键,您应该指定模型中主键的内容:

protected $primaryKey = 'your_primary_key';

关系看起来不错,之后你必须确保$ user是一个已定义的Employee实例,因为你的错误可能意味着你的实例甚至没有定义,所以例如你使用list.blade.php ,您需要更改控制器的返回值并指示您要将数据传递到视图,例如,您可以这样做:

return view('users.list', compact('user'));

用户是在&#39; $ user&#39;

上保存的Employee实例

<强>更新

首先你要检查你的用户是否被正确检索,你可以通过放置一个dd($ user)来检查它 当你返回一个视图时你可以将信息传递给它,一个更干净的方式来做你想要做的就是我之前写的所以你最终会得到这样的东西:

public function index()
{
    $users = DB::table('nmtrabajador')
                 ->where('CONDICION', '=', 'A')
                 ->get();
    // dd($user) for debugging you are retrieving the user properly
    return view('users.list', compact($users));
}