- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
我正在尝试设置一个 Azure 数据工厂,将我的数据从 AzureSQL 数据库复制和非规范化到另一个 AzureSQL 数据库,以用于数据流的报告/BI 目的,但我遇到了插入日期的问题。
这是我的数据流的定义。
{
"name": "dataflow1",
"properties": {
"type": "MappingDataFlow",
"typeProperties": {
"sources": [
{
"dataset": {
"referenceName": "AzureSqlTable1",
"type": "DatasetReference"
},
"name": "source1"
}
],
"sinks": [
{
"dataset": {
"referenceName": "AzureSqlTable2",
"type": "DatasetReference"
},
"name": "sink1"
}
],
"script": "\n\nsource(output(\n\t\tBucketId as string,\n\t\tStreamId as string,\n\t\tStreamIdOriginal as string,\n\t\tStreamRevision as integer,\n\t\tItems as integer,\n\t\tCommitId as string,\n\t\tCommitSequence as integer,\n\t\tCommitStamp as timestamp,\n\t\tCheckpointNumber as long,\n\t\tDispatched as boolean,\n\t\tHeaders as binary,\n\t\tPayload as binary\n\t),\n\tallowSchemaDrift: true,\n\tvalidateSchema: false,\n\tisolationLevel: 'READ_UNCOMMITTED',\n\tformat: 'table') ~> source1\nsource1 sink(allowSchemaDrift: true,\n\tvalidateSchema: false,\n\tformat: 'table',\n\tdeletable:false,\n\tinsertable:true,\n\tupdateable:false,\n\tupsertable:false,\n\tmapColumn(\n\t\tBucketId,\n\t\tCommitStamp\n\t)) ~> sink1"
}
}
}
{
"name": "AzureSqlTable1",
"properties": {
"linkedServiceName": {
"referenceName": "Source_Test",
"type": "LinkedServiceReference"
},
"annotations": [],
"type": "AzureSqlTable",
"schema": [
{
"name": "BucketId",
"type": "varchar"
},
{
"name": "StreamId",
"type": "char"
},
{
"name": "StreamIdOriginal",
"type": "nvarchar"
},
{
"name": "StreamRevision",
"type": "int",
"precision": 10
},
{
"name": "Items",
"type": "tinyint",
"precision": 3
},
{
"name": "CommitId",
"type": "uniqueidentifier"
},
{
"name": "CommitSequence",
"type": "int",
"precision": 10
},
{
"name": "CommitStamp",
"type": "datetime2",
"scale": 7
},
{
"name": "CheckpointNumber",
"type": "bigint",
"precision": 19
},
{
"name": "Dispatched",
"type": "bit"
},
{
"name": "Headers",
"type": "varbinary"
},
{
"name": "Payload",
"type": "varbinary"
}
],
"typeProperties": {
"tableName": "[dbo].[Commits]"
}
}
}
{
"name": "AzureSqlTable2",
"properties": {
"linkedServiceName": {
"referenceName": "Dest_Test",
"type": "LinkedServiceReference"
},
"annotations": [],
"type": "AzureSqlTable",
"schema": [],
"typeProperties": {
"tableName": "dbo.Test2"
}
}
}
Activity dataflow1 failed: DF-EXEC-1 Conversion failed when converting date and/or time from character string.
com.microsoft.sqlserver.jdbc.SQLServerException: Conversion failed when converting date and/or time from character string.
at com.microsoft.sqlserver.jdbc.SQLServerException.makeFromDatabaseError(SQLServerException.java:258)
at com.microsoft.sqlserver.jdbc.TDSTokenHandler.onEOF(tdsparser.java:256)
at com.microsoft.sqlserver.jdbc.TDSParser.parse(tdsparser.java:108)
at com.microsoft.sqlserver.jdbc.TDSParser.parse(tdsparser.java:28)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy.doInsertBulk(SQLServerBulkCopy.java:1611)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy.access$200(SQLServerBulkCopy.java:58)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy$1InsertBulk.doExecute(SQLServerBulkCopy.java:709)
at com.microsoft.sqlserver.jdbc.TDSCommand.execute(IOBuffer.java:7151)
at com.microsoft.sqlserver.jdbc.SQLServerConnection.executeCommand(SQLServerConnection.java:2478)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy.sendBulkLoadBCP(SQLServerBulkCopy.java:739)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy.writeToServer(SQLServerBulkCopy.java:1684)
at com.microsoft.sqlserver.jdbc.SQLServerBulkCopy.writeToServer(SQLServerBulkCopy.java:669)
at com.microsoft.azure.sqldb.spark.connect.DataFrameFunctions.com$microsoft$azure$sqldb$spark$connect$DataFrameFunctions$$bulkCopy(DataFrameFunctions.scala:127)
at com.microsoft.azure.sqldb.spark.connect.DataFrameFunctions$$anonfun$bulkCopyToSqlDB$1.apply(DataFrameFunctions.scala:72)
at com.microsoft.azure.sqldb.spark.connect.DataFrameFunctions$$anonfun$bulkCopyToSqlDB$1.apply(DataFrameFunctions.scala:72)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:948)
at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$28.apply(RDD.scala:948)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2226)
at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2226)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:124)
at org.apache.spark.executor.Executor$TaskRunner$$anonfun$11.apply(Executor.scala:459)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1401)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:465)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
VARCHAR(50)
作为
[CommitStamp]
的类型,这并不奇怪:
INSERT BULK dbo.T_301fcb5e4a4148d4a48f2943011b2f04 (
[BucketId] NVARCHAR(MAX),
[CommitStamp] VARCHAR(50),
[StreamId] NVARCHAR(MAX),
[StreamIdOriginal] NVARCHAR(MAX),
[StreamRevision] INT,
[Items] INT,
[CommitId] NVARCHAR(MAX),
[CommitSequence] INT,
[CheckpointNumber] BIGINT,
[Dispatched] BIT,
[Headers] VARBINARY(MAX),
[Payload] VARBINARY(MAX),
[r8e440f7252bb401b9ead107597de6293] INT)
with (ROWS_PER_BATCH = 4096, TABLOCK)
CommitStamp
作为字符串类型。
source(output(
BucketId as string,
StreamId as string,
StreamIdOriginal as string,
StreamRevision as integer,
Items as integer,
CommitId as string,
CommitSequence as integer,
CommitStamp as timestamp,
CheckpointNumber as long,
Dispatched as boolean,
Headers as binary,
Payload as binary
),
allowSchemaDrift: true,
validateSchema: false,
isolationLevel: 'READ_UNCOMMITTED',
format: 'table',
schemaName: '[dbo]',
tableName: '[Commits]',
store: 'sqlserver',
server: 'sign2025-sqldata.database.windows.net',
database: 'SignPath.Application',
user: 'Sign2025Admin',
password: '**********') ~> source1
source1 sink(allowSchemaDrift: true,
validateSchema: false,
format: 'table',
deletable:false,
insertable:true,
updateable:false,
upsertable:false,
mapColumn(
BucketId,
CommitStamp
),
schemaName: 'dbo',
tableName: 'Test2',
store: 'sqlserver',
server: 'sign2025-sqldata.database.windows.net',
database: 'SignPath.Reporting',
user: 'Sign2025Admin',
password: '**********') ~> sink1
最佳答案
我创建了一个数据流,用于将数据从 Azure SQL 数据库复制到另一个 Azure SQL 数据库。成功隐蔽datatime2
至 VARCHAR(50)
.
这是我的数据流的定义:
{
"name": "dataflow1",
"properties": {
"type": "MappingDataFlow",
"typeProperties": {
"sources": [
{
"dataset": {
"referenceName": "DestinationDataset_sto",
"type": "DatasetReference"
},
"name": "source1"
}
],
"sinks": [
{
"dataset": {
"referenceName": "DestinationDataset_mex",
"type": "DatasetReference"
},
"name": "sink1"
}
],
"script": "\n\nsource(output(\n\t\tID as integer,\n\t\ttName as string,\n\t\tmyTime as timestamp\n\t),\n\tallowSchemaDrift: true,\n\tvalidateSchema: false,\n\tisolationLevel: 'READ_UNCOMMITTED',\n\tformat: 'table') ~> source1\nsource1 sink(input(\n\t\tID as integer,\n\t\ttName as string,\n\t\tmyTime as string\n\t),\n\tallowSchemaDrift: true,\n\tvalidateSchema: false,\n\tformat: 'table',\n\tdeletable:false,\n\tinsertable:true,\n\tupdateable:false,\n\tupsertable:false) ~> sink1"
}
}
}
{
"name": "DestinationDataset_sto",
"properties": {
"linkedServiceName": {
"referenceName": "AzureSqlDatabase1",
"type": "LinkedServiceReference"
},
"annotations": [],
"type": "AzureSqlTable",
"schema": [
{
"name": "ID",
"type": "int",
"precision": 10
},
{
"name": "tName",
"type": "varchar"
},
{
"name": "myTime",
"type": "datetime2",
"scale": 7
}
],
"typeProperties": {
"tableName": "[dbo].[demo]"
}
},
"type": "Microsoft.DataFactory/factories/datasets"
}
{
"name": "DestinationDataset_mex",
"properties": {
"linkedServiceName": {
"referenceName": "AzureSqlDatabase1",
"type": "LinkedServiceReference"
},
"annotations": [],
"type": "AzureSqlTable",
"schema": [
{
"name": "ID",
"type": "int",
"precision": 10
},
{
"name": "tName",
"type": "varchar"
},
{
"name": "myTime",
"type": "varchar"
}
],
"typeProperties": {
"tableName": "[dbo].[demo1]"
}
},
"type": "Microsoft.DataFactory/factories/datasets"
}
myTime
表 demo 和 demo1 几乎具有相同的模式.
demo
复制的数据:
source(output(
ID as integer,
tName as string,
myTime as timestamp
),
allowSchemaDrift: true,
validateSchema: true,
isolationLevel: 'SERIALIZABLE',
format: 'table',
schemaName: '[dbo]',
tableName: '[demo]',
store: 'sqlserver',
server: '****.database.windows.net',
database: '****',
user: 'ServerAdmin',
password: '**********') ~> source1
source1 sink(input(
ID as integer,
tName as string,
myTime as string
),
allowSchemaDrift: true,
validateSchema: false,
format: 'table',
deletable:false,
insertable:true,
updateable:false,
upsertable:false,
schemaName: '[dbo]',
tableName: '[demo1]',
store: 'sqlserver',
server: '****.database.windows.net',
database: '****',
user: 'ServerAdmin',
password: '**********') ~> sink1
Data Flow can convert
datatime2
toVARCHAR()
(maybeNVARCHAR()
) ,date
,datetimeoffset
.
time
,
datetime
,
datetime2
,
smalldatetime
, 数据流总是报错:
"message": "DF-EXEC-1 Conversion failed when converting date and/or time from character
关于azure-sql-database - 为什么 Azure 数据工厂似乎坚持将 DateTimes 作为字符串插入?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/56948054/
我应该执行以下操作: 可能通过服务/工厂,使用 $q(异步)查询 API 以获取大型名称数据集 有另一个服务(也是异步的),它应该只返回上述工厂的元素,如果它们与某个字符串(搜索字段)匹配。目的是缩小
我有一个通用的基类。我有一个实现基类的具体类。 我将如何创建工厂类/方法来交付不同类型的具体类? 举个例子: public class ReceiverBase where T : IInte
我正在查看以下链接中的 Ninject Factory 扩展: http://www.planetgeek.ch/2011/12/31/ninject-extensions-factory-intro
工厂、提供商和服务这三个术语之间有什么区别? 刚刚了解 NHibernate 及其存储库模式(POCO 类等)。 最佳答案 工厂:通过将一堆位组合在一起或基于某种上下文选择类型来组装类 Provide
使用CGLIB我可以做到 final var enhancer = new Enhancer(); enhancer.setUseCache(false); enhancer.setSuperclas
我试图在 Kotlin 中使用伴随对象工厂方法(相当于 Java 中的静态工厂方法)创建一个嵌套内部类。这是我的代码的简化版本。 class OuterClass { var myData:L
我正在为我的大学做一个项目,但遇到了问题。 基本上,该项目由一个客户端-服务器应用程序组成,我想创建一个用于通信的 Packet 类。数据包由 header 和主体组成。现在问题来了。我可以有一些不同
这个问题在这里已经有了答案: Why doesn't polymorphism work without pointers/references? (6 个答案) What is object sl
我正在制作一个套接字工厂。我希望每个外部应用程序都使用 Socket 类的接口(interface),它是几个类(ServerSocketTCP、ClientSocketTCP、ServerSocke
我是 angularjs 的新手,我正在尝试创建一个小型电影数据库。这是我第一次使用工厂,我想确保这是正确的方法,以及如何在另一个功能中使用这个工厂,如下所示? 我希望这个工厂只运行一次,这样我就可以
这个问题在这里已经有了答案: Java inner class and static nested class (28 个答案) 关闭 5 年前。 public class DataFactory
我看过很多关于 C++ 工厂的帖子,但到目前为止我还没有看到解决我的问题的解决方案。 (虽然我可能遗漏了一些东西。) 示例控制台应用程序: #include #include #include
这是一个简单的 C++ 项目,有 2 种设计模式:单例和工厂,sigleton 也是一个模板化类,一个接口(interface) (IHash) 和一个类 (Hash1)。一个简单的工厂类 (Hash
这个问题类似于Factory and generics ,并且可能有相同的答案,但它是不同的。我有一个通用基类,它将由完全独立的 JAR 中的类进行扩展。所述 JAR 应该能够在不更改任何其他代码的情
问题是我需要为传递的类创建一个新实例 有没有办法重写这个函数,让它可以接受任意数量的参数? function createInstance(ofClass, arg1, arg2, arg3, ...
我想用简单的 C++ 语法创建一个简单的工厂方法: void *createObject(const char *str,...) { if(!strcmp("X",str)) retu
经过大约 10 个月的程序化 PHP 学习后,我现在正尝试着手研究基本的 OOP 原则和设计模式。这是一个爱好,我没有那么多时间去追求它,所以请原谅这个问题的水平很低。 我的网站(目前 100% 程序
我有一个简单的问题。 我如何编写一个工厂来定义使用 make() 或 create() 的关系,具体取决于原始调用 make() 还是 create()? 这是我的用例: 我有一个简单的工厂 /**
我正在尝试在延迟加载模块中提供 APP_BASE_HREF 注入(inject) token ,然而,工厂方法根本没有被调用。 在这里https://github.com/MaurizioCascia
我有以下 ast: import { factory as f } from 'typescript' const typeDeclaration = f.createTypeAliasDeclara
我是一名优秀的程序员,十分优秀!