gpt4 book ai didi

json - 使用R将JSON文件转换为CSV文件

转载 作者:行者123 更新时间:2023-12-04 07:34:28 26 4
gpt4 key购买 nike

我尝试将一个JSON文件保存在.txt文件中,但尝试将其加载到R中,但出现以下错误:

Error in feed_push_parser(readBin(con, raw(), n), reset = TRUE) : 
parse error: trailing garbage
" : "SUCCESS" } /* 1 */ { "_id" : "b736c374-b8ae-4e9
(right here) ------^

我假设错误是由于/*(数字)*/的多个实例引起的,并且我无法手动将其全部删除,因为我的文件具有这些实例的10k实例。有没有办法在将数据加载到R中之前删除此类实例?

我的JSON文件如下所示:
/* 0 */
{
"_id" : "93ccbdb6-8947",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1KKP",
"queryId" : "93ccbdb6-8947",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 0,
"status" : "SUCCESS"
}

/* 1 */
{
"_id" : "b736c374-b8ae",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1KKP",
"queryId" : "b736c374-b8ae",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 0,
"status" : "SUCCESS"
}

/* 2 */
{
"_id" : "3312605f-8304",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1SXE",
"queryId" : "3312605f-8304",
"subRequests" : [{
"origin" : "LON",
"destination" : "IAD",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 2,
"requestDate" : 20151205,
"totalRecords" : 0,
"status" : "SUCCESS"
}

/* 3 */
{
"_id" : "6b668cfa-9b79",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1NXA",
"queryId" : "6b668cfa-9b79",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 1,
"requestDate" : 20151205,
"totalRecords" : 1388,
"status" : "SUCCESS"
}

/* 4 */
{
"_id" : "41c373a1-e4cb",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP6CXS",
"queryId" : "41c373a1-e4cb",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 1388,
"status" : "SUCCESS"
}

/* 5 */
{
"_id" : "2c8331c4-21ca",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1KKP",
"queryId" : "2c8331c4-21ca",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 1388,
"status" : "SUCCESS"
}

/* 6 */
{
"_id" : "71a09900-1c13",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP6CXS",
"queryId" : "71a09900-1c13",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AF",
"fareClasses" : "",
"owrt" : "1,2"
}, {
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}, {
"origin" : "WAS",
"destination" : "LON",
"carrier" : "DL",
"fareClasses" : "",
"owrt" : "1,2"
}, {
"origin" : "WAS",
"destination" : "LON",
"carrier" : "LH",
"fareClasses" : "",
"owrt" : "1,2"
}, {
"origin" : "WAS",
"destination" : "LON",
"carrier" : "BA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 6941,
"status" : "SUCCESS"
}

/* 7 */
{
"_id" : "a036a42a-918b",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1MMM",
"queryId" : "a036a42a-918b",
"subRequests" : [{
"origin" : "WAS",
"destination" : "LON",
"carrier" : "AA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 0,
"requestDate" : 20151205,
"totalRecords" : 1388,
"status" : "SUCCESS"
}

/* 8 */
{
"_id" : "c547be36-805c",
"uiSearchRequest" : {
"travelDate" : 20151206,
"travelDuration" : 7,
"shopperDuration" : 30,
"oneWay" : false,
"userId" : "ATP1SXB",
"queryId" : "c547be36-805c",
"subRequests" : [{
"origin" : "CHI",
"destination" : "LON",
"carrier" : "BA",
"fareClasses" : "",
"owrt" : "1,2"
}]
},
"downloadCount" : 2,
"requestDate" : 20151205,
"totalRecords" : 1072,
"status" : "SUCCESS"
}

我的代码如下(尽管我走得还很远):
library(jsonlite)
library(RJSONIO)
json_data_raw<-fromJSON("mydata.txt")

json_file <- lapply(json_data_raw, function(x) {
x[sapply(x, is.null)] <- NA
unlist(x)
})

output <-- do.call("rbind", json_file)
write.csv(a, file="json.csv",row.names = FALSE)
file.show("json.csv")

我正在尝试将输出输出到CSV文件中,如下所示

I'm trying to get output into a CSV file like below

最佳答案

您的文本文件存在几个问题。正如您已经注意到的,您需要删除/* 0 */形式的行。结果仍然是无效的json。如果要在文件中包含多个json对象,则需要将它们存储在数组中。 json对象是在 curl 状态下关闭的部分,例如,

{
"_id" : "93ccbdb6-8947-4687-8e12-edf4e40d6650",
...
"totalRecords" : 0,
"status" : "SUCCESS"
}

对象数组的结构如下:
[
{
...
},
{
...
}
]

要使文件成形,您需要在对象之间添加逗号并添加方括号。您可以按照以下步骤进行操作:
raw <- readLines("mydata.txt")

# get rid of the "/* 0 */" lines
json <- grep("^/\\* [0-9]* \\*/", raw, value = TRUE, invert = TRUE)

# add missing comma after }
n <- length(json)
json[-n] <- gsub("^}$", "},", json[-n])

# add brakets at the beginning and end
json <- c("[", json, "]")

可以通过 fromJSON()读取它,因此我认为它是有效的json:
library(jsonlite)
table <- fromJSON(json)

该表是嵌套的,也就是说,某些表单元格本身包含一个数据框或一个列表。例如,
table[1,2]
## travelDate travelDuration shopperDuration oneWay userId queryId
## 1 20151206 7 30 FALSE ATP1KKP 93ccbdb6-8947-4687-8e12-edf4e40d6650
## subRequests
## 1 WAS, LON, AA, , 1,2

您可以使用 flatten()包中的 jsonlite来获得具有少一层嵌套的表
flatten(table)[1:3, c(1, 6, 12)]
## _id uiSearchRequest.travelDate uiSearchRequest.subRequests
## 1 93ccbdb6-8947-4687-8e12-edf4e40d6650 20151206 WAS, LON, AA, , 1,2
## 2 b736c374-b8ae-4e99-8073-9c54517fecd5 20151206 WAS, LON, AA, , 1,2
## 3 3312605f-8304-4ab8-96d6-6e1a03cfbd9e 20151206 LON, IAD, AA, , 1,2

最后一列仍然是列表。您可以通过多种方式来处理此问题。一种可能性是为每个子请求创建一行,在该行中重复所有其他列( X_iddownloadCount等)的内容。 (这几乎是您在问题中给出的形式,唯一的区别是,在我重复内容时,您在重复的列中将单元格留空了。)这是可以做到的:
table <- flatten(fromJSON(json))
tab_list <- lapply(1:nrow(table),
function(i) data.frame(table[i, -12], table[i, 12],
stringsAsFactors = FALSE))
library(dplyr)
flat_table <- bind_rows(tab_list)

第二行创建一个数据帧列表。使用 bind_rows()中的 dpylr将这些组合为单个数据帧。 (更准确地说, flat_table将是 tbl_df,但是与 data.frame的区别很小。)然后可以按照通常的方式将其写入csv文件:
write.csv(flat_table, file = "mydata.csv")

关于json - 使用R将JSON文件转换为CSV文件,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/37996827/

26 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com